source
stringlengths
3
92
c
stringlengths
26
2.25M
omp_ex_02.c
#include <stdio.h> #include <omp.h> /* MIT License Copyright (c) 2019 NOUREDDINE DAGHBOUDJ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ int main() { // Hello World! in parallel #pragma omp parallel { printf("Hello World!\n"); } // What's going on?!!! return 0; }
@mropes.nim.c
/* Generated by Nim Compiler v1.0.11 */ /* (c) 2019 Andreas Rumpf */ /* The generated code is subject to the original license. */ #define NIM_INTBITS 64 #include "nimbase.h" #include <string.h> #include <stdio.h> #undef LANGUAGE_C #undef MIPSEB #undef MIPSEL #undef PPC #undef R3000 #undef R4000 #undef i386 #undef linux #undef mips #undef near #undef far #undef powerpc #undef unix #define nimfr_(x, y) #define nimln_(x, y) typedef struct tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA; typedef struct TNimType TNimType; typedef struct TNimNode TNimNode; typedef struct RootObj RootObj; typedef struct NimStringDesc NimStringDesc; typedef struct TGenericSeq TGenericSeq; typedef struct tySequence__WwUFq9cJ2xKRlsAWVEHyPRg tySequence__WwUFq9cJ2xKRlsAWVEHyPRg; typedef struct tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g; typedef struct tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w; typedef struct tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ; typedef struct tyObject_GcStack__7fytPA5bBsob6See21YMRA tyObject_GcStack__7fytPA5bBsob6See21YMRA; typedef struct tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg; typedef struct tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ; typedef struct tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg; typedef struct tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw; typedef struct tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA; typedef struct tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw; typedef struct tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw; typedef struct tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg; typedef struct tyTuple__ujsjpB2O9cjj3uDHsXbnSzg tyTuple__ujsjpB2O9cjj3uDHsXbnSzg; typedef struct tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg; typedef struct tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ; typedef struct tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg; typedef tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* tyArray__USLYl0Lpkimm4FABiJ3ldA[4096]; typedef NU8 tyEnum_TNimKind__jIBKr1ejBgsfM33Kxw4j7A; typedef NU8 tySet_tyEnum_TNimTypeFlag__v8QUszD1sWlSIWZz7mC4bQ; typedef N_NIMCALL_PTR(void, tyProc__ojoeKfW4VYIm36I9cpDTQIg) (void* p, NI op); typedef N_NIMCALL_PTR(void*, tyProc__WSm2xU5ARYv9aAR4l0z9c9auQ) (void* p); struct TNimType { NI size; tyEnum_TNimKind__jIBKr1ejBgsfM33Kxw4j7A kind; tySet_tyEnum_TNimTypeFlag__v8QUszD1sWlSIWZz7mC4bQ flags; TNimType* base; TNimNode* node; void* finalizer; tyProc__ojoeKfW4VYIm36I9cpDTQIg marker; tyProc__WSm2xU5ARYv9aAR4l0z9c9auQ deepcopy; }; typedef NU8 tyEnum_TNimNodeKind__unfNsxrcATrufDZmpBq4HQ; struct TNimNode { tyEnum_TNimNodeKind__unfNsxrcATrufDZmpBq4HQ kind; NI offset; TNimType* typ; NCSTRING name; NI len; TNimNode** sons; }; struct RootObj { TNimType* m_type; }; struct TGenericSeq { NI len; NI reserved; }; struct NimStringDesc { TGenericSeq Sup; NIM_CHAR data[SEQ_DECL_SIZE]; }; struct tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA { RootObj Sup; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* left; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* right; NI L; NimStringDesc* data; }; typedef N_NIMCALL_PTR(void, tyProc__T4eqaYlFJYZUv9aG9b1TV0bQ) (void); struct tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g { NI refcount; TNimType* typ; }; struct tyObject_GcStack__7fytPA5bBsob6See21YMRA { void* bottom; }; struct tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w { NI len; NI cap; tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g** d; }; typedef tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ* tyArray__SiRwrEKZdLgxqz9a9aoVBglg[512]; typedef NU32 tyArray__BHbOSqU1t9b3Gt7K2c6fQig[24]; typedef tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg* tyArray__N1u1nqOgmuJN9cSZrnMHgOQ[32]; typedef tyArray__N1u1nqOgmuJN9cSZrnMHgOQ tyArray__B6durA4ZCi1xjJvRtyYxMg[24]; typedef tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw* tyArray__lh2A89ahMmYg9bCmpVaplLbA[256]; struct tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA { tyArray__lh2A89ahMmYg9bCmpVaplLbA data; }; typedef tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* tyArray__0aOLqZchNi8nWtMTi8ND8w[2]; struct tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw { tyArray__0aOLqZchNi8nWtMTi8ND8w link; NI key; NI upperBound; NI level; }; struct tyTuple__ujsjpB2O9cjj3uDHsXbnSzg { tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg* Field0; NI Field1; }; typedef tyTuple__ujsjpB2O9cjj3uDHsXbnSzg tyArray__LzOv2eCDGiceMKQstCLmhw[30]; struct tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg { NI len; tyArray__LzOv2eCDGiceMKQstCLmhw chunks; tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg* next; }; struct tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg { NI minLargeObj; NI maxLargeObj; tyArray__SiRwrEKZdLgxqz9a9aoVBglg freeSmallChunks; NU32 flBitmap; tyArray__BHbOSqU1t9b3Gt7K2c6fQig slBitmap; tyArray__B6durA4ZCi1xjJvRtyYxMg matrix; tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw* llmem; NI currMem; NI maxMem; NI freeMem; NI occ; NI lastSize; tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA chunkStarts; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* root; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* deleted; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* last; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* freeAvlNodes; NIM_BOOL locked; NIM_BOOL blockChunkSizeIncrease; NI nextChunkSize; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw bottomData; tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg heapLinks; }; struct tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg { NI stackScans; NI cycleCollections; NI maxThreshold; NI maxStackSize; NI maxStackCells; NI cycleTableSize; NI64 maxPause; }; struct tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ { NI counter; NI max; tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg* head; tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg** data; }; struct tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ { tyObject_GcStack__7fytPA5bBsob6See21YMRA stack; NI cycleThreshold; NI zctThreshold; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w zct; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w decStack; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w tempStack; NI recGcLock; tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg region; tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg stat; tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ marked; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w additionalRoots; NI gcThreadId; }; typedef NU8 tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg; typedef NIM_CHAR tyArray__9bKy7UA2LOi2vzOViufaW1Q[1024]; struct tySequence__WwUFq9cJ2xKRlsAWVEHyPRg { TGenericSeq Sup; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* data[SEQ_DECL_SIZE]; }; N_NIMCALL(void, nimGCvisit)(void* d, NI op); static N_NIMCALL(void, Marker_tyRef__4hi0XQqK9aLiPuWT9acsXm9aQ)(void* p, NI op); static N_NIMCALL(void, TM__Vw9cfUOQOae9b9bzZBlucMZQg_3)(void); N_NIMCALL(void, nimRegisterGlobalMarker)(tyProc__T4eqaYlFJYZUv9aG9b1TV0bQ markerProc); N_NIMCALL(NimStringDesc*, mnewString)(NI len); N_LIB_PRIVATE N_NIMCALL(NI, len__9b0YRltzV3kNSE9aQTsG82wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a); N_NIMCALL(NimStringDesc*, setLengthStr)(NimStringDesc* s, NI newLen); N_NIMCALL(void*, newSeq)(TNimType* typ, NI len); static N_INLINE(void, asgnRef)(void** dest, void* src); static N_INLINE(void, incRef__AT1eRuflKWyTTBdLjEDZbg_3system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); static N_INLINE(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*, usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem)(void* usr); static N_INLINE(void, decRef__AT1eRuflKWyTTBdLjEDZbgsystem)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); static N_INLINE(void, rtlAddZCT__AT1eRuflKWyTTBdLjEDZbg_2system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); N_LIB_PRIVATE N_NOINLINE(void, addZCT__Y66tOYFjgwJ0k4aLz4bc0Q)(tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w* s, tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); static N_INLINE(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem)(tySequence__WwUFq9cJ2xKRlsAWVEHyPRg** s); N_NIMCALL(TGenericSeq*, setLengthSeqV2)(TGenericSeq* s, TNimType* typ, NI newLen); N_NIMCALL(void, unsureAsgnRef)(void** dest, void* src); N_NIMCALL(TGenericSeq*, incrSeqV3)(TGenericSeq* s, TNimType* typ); static N_INLINE(void, appendString)(NimStringDesc* dest, NimStringDesc* src); static N_INLINE(void, copyMem__i80o3k0SgEI5gTRCzYdyWAsystem)(void* dest, void* source, NI size); static N_INLINE(void, nimCopyMem)(void* dest, void* source, NI size); N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest, NI addlen); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA)(NimStringDesc* frmt, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0); N_LIB_PRIVATE N_NIMCALL(void, add__yG4AKzsBRS1W4MANDlXQeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, NimStringDesc* b); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___Z7W1o5nPSc3ExfO5f7j1Gg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, NimStringDesc* b); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___ShdZ6VrAQkY0nWR9a39b9bGdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, newRope__dBdikNFB2Y7QJ9aVJE7dGHg)(NimStringDesc* data); N_NIMCALL(void*, newObj)(TNimType* typ, NI size); N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src); static N_INLINE(void, nimGCunrefNoCycle)(void* p); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, rope__yShmEg9cffWxI7s5XzEKBow)(NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, insertInCache__yShmEg9cffWxI7s5XzEKBow_2)(NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(NI, hash__6PCYkKlCNhq9cnRLnqWKkwQ)(NimStringDesc* x); static N_INLINE(NIM_BOOL, eqStrings)(NimStringDesc* a, NimStringDesc* b); static N_INLINE(NIM_BOOL, equalMem__9bGgqEk7RXXl6eqM9c1HdELAsystem)(void* a, void* b, NI size); static N_INLINE(int, nimCmpMem)(void* a, void* b, NI size); N_LIB_PRIVATE N_NIMCALL(void, add__IM4kcMNkkOLJtqdEqSxR8A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b); N_LIB_PRIVATE N_NIMCALL(void, failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A)(NimStringDesc* msg); N_NIMCALL(NimStringDesc*, rawNewString)(NI space); N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, substr__2yh9cer0ymNRHlOOg8P7IuA)(NimStringDesc* s, NI first, NI last); N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x); N_LIB_PRIVATE N_NIMCALL(void, write__PArlm09bKklm2BLsCg6YtaA)(FILE* f, NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, open__gq12VLhVO0NBzUTnGgz4nw)(FILE** f, NimStringDesc* filename, tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg mode, NI bufSize); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__9bihNFg7Qajcg9arfx5cr9aHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, FILE* f); static N_INLINE(void, nimZeroMem)(void* p, NI size); static N_INLINE(void, nimSetMem__zxfKBYntu9cBapkhrCOk1fgmemory)(void* a, int v, NI size); N_LIB_PRIVATE N_NIMCALL(NI, readBuffer__Y9atVWUcVyKHG9aBP4D0P9czA)(FILE* f, void* buffer, NI len); static N_INLINE(NCSTRING, nimToCStringConv)(NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(void, close__fU6ZlJAtQ9bre04EDZLdGsA_3)(FILE* f); N_LIB_PRIVATE N_NIMCALL(void, writeRope__FwuzOBq6SLlanVUstm8q9cA)(FILE* f, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__Wiam9c8x73Mtmbj0r4Ppikg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, NimStringDesc* filename); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, writeRope__LLRRC42xWBSkxzV9bsPu7lA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* head, NimStringDesc* filename); tyArray__USLYl0Lpkimm4FABiJ3ldA cache__WGMp5Wo1NlgbAMOysPIfmQ; extern TNimType NTI__ytyiCJqK439aF9cIibuRVpAg_; TNimType NTI__OFzf0kSiPTcNreUIeJgWVA_; extern TNimType NTI__rR5Bzr1D5krxoo1NcNyeMA_; extern TNimType NTI__77mFvmsOLKik79ci2hXkHEg_; TNimType NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_; TNimType NTI__USLYl0Lpkimm4FABiJ3ldA_; NI gCacheTries__5GfZTThHPBfB9bjRZdFluBw; NI gCacheMisses__fLRm9am8S0daYBVNK6JKyBg; NI gCacheIntTries__opyfsNv023Md1P05mqsDew; extern TNimType NTI__WwUFq9cJ2xKRlsAWVEHyPRg_; extern tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ gch__IcYaEuuWivYAS86vFMTS3Q; STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_4, "$", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_5, "ropes.nim(238, 20) `false` invalid format string: ", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_6, "ropes.nim(250, 20) `false` invalid format string: ", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_7, "ropes.nim(253, 20) `false` invalid format string: ", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_8, "\012", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_9, "ropes.nim(263, 18) `false` invalid format string: ", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_10, "[$1, $2, $3]", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_11, "FR_.len-=$1;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_12, "} $1: ;$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_13, "}$n", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_14, "FR_.len+=$1;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_15, "void", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_16, ", ", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_17, "$1 $2;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_18, "typedef $1 $2 $2;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_19, "*", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_20, " ", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_21, ", NI $1Len_$2", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_22, " Result", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_23, "$1$2($3, $4)$5", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_24, "(*$1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_25, "static TNimType* $1;$n", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_26, "\011$1 = (TNimType*)hcrGetGlobal($2, \"$1\");$n", 42); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_27, "extern TNimType $1;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_28, "NTI$1_", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_29, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_30, "$1.flags = $2;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_31, "$1.name = $2;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_32, "$1.nextType = nimTypeRoot; nimTypeRoot=&$1;$n", 45); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_33, "\011hcrRegisterGlobal($2, \"$1\", sizeof(TNimType), NULL, (void**)&$" "1);$n", 68); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_34, "TNimType $1;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_35, "$1[$2]", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_36, "static TNimNode** $1;$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_37, "\011hcrRegisterGlobal($3, \"$1\", sizeof(TNimNode*) * $2, NULL, (voi" "d**)&$1);$n", 74); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_38, "static TNimNode* $1[$2];$n", 26); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_39, "$1[$2] = &$3;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_40, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$" "n$1.name = \"Field$3\";$n", 86); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_41, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_42, "$1.len = $2; $1.kind = 2;$n", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_43, "$1.node = &$2;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_44, "static N_NIMCALL(void, $1)(void* p, NI op)", 42); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_45, "$1 a;$n", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_46, "a = ($1)p;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_47, "for ($1 = 0; $1 < $2; $1++) {$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_48, "($1 \? $1->$2 : 0)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_49, "$1.Sup", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_50, "#pragma pack(push, 1)$nstruct{", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_51, "};$n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_52, "#pragma pack(pop)$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_53, "union{$n$1};$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_54, "$1 $2[SEQ_DECL_SIZE];$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_55, "$1 $2:$3;$n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_56, "switch ($1.$2) {$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_57, "case $1 ... $2:$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_58, "(-2147483647 -1)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_59, "IL64($1)", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_60, "(IL64(-9223372036854775807) - IL64(1))", 38); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_61, "NIM_TRUE", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_62, "NIM_FALSE", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_63, "(($1) $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_64, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_65, "STRING_LITERAL($1, $2, $3);$n", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_66, "static const struct {$n NI cap; void* allocator; NIM_CHAR data" "[$2+1];$n} $1 = { $2, NIM_NIL, $3 };$n", 101); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_67, "static const NimStringV2 $1 = {$2, (NimStrPayload*)&$3};$n", 58); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_68, "case $1:$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_69, "default:$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_70, "break;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_71, "} $n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_72, "$1.$2", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_73, "$1$3[$2]", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_74, "$1 {$n$2$3$4}\012", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_75, "$1;\012", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_76, "N_NIMCALL_PTR(void, $1)(void*, NI);\012", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_77, "\011$1 = (N_NIMCALL_PTR(void, )(void*, NI)) hcrRegisterProc($3, \"$" "1\", (void*)$2);\012", 79); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_78, "$1.marker = $2;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_79, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_80, "$1.offset = $2;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_81, "NI $1;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_82, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_83, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o" "ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_84, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_85, "$1.flags = 1<<2;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_86, "$1.destructor = (void*)$2; $1.size = sizeof($3); $1.name = $4;$" "n", 64); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_87, "NimDT_$1_$2", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_88, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_89, "TNimNode* $1[$2];$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_90, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n", 74); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_91, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_92, "Result", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_93, "$N#line $2 $1$N", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_94, "struct {$1} GCFRAME_;$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_95, "\011}BeforeRet_: ;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_96, "}$N", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_97, "\011$1 = ($3) hcrRegisterProc($4, \"$1\", (void*)$2);$n", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_98, "$1(*)$2", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_99, "static void* $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_100, "\011$1 = ($2) ($3$4));$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_101, "$2 $1;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_102, "\011$1 = ($2) hcrRegisterProc($3, \"$1\", (void*)$1);$n", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_103, "\011$1 = ($2) hcrGetProc($3, \"$1\");$n", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_104, " $1;$n", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_105, "\011$1 = ($2*)hcrGetGlobal($3, \"$1\");$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_106, "NIM_CHECK_SIZE($1, $2);$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_107, "typedef NI32 $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_108, "typedef NU8 $1;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_109, "typedef NU16 $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_110, "typedef NI64 $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_111, "typedef $1_PTR($2, $3) $4;$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_112, "typedef struct {$nN_NIMCALL_PTR($2, ClP_0) $3;$nvoid* ClE_0;$n}" " $1;$n", 69); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_113, "typedef $1 $2[1];$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_114, "typedef $1 $2[$3];$n", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_115, " {$n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_116, "char dummy;$n", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_117, "TY", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_118, "typedef $1 $2;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_119, "$1 $2 {$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_120, "$1 Field$2;$n", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_121, "typedef NU$2 $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_122, "typedef NU8 $1[$2];$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_123, "Field$1", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_124, "NIM_CONST $1 $2 = $3;$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_125, ",$n", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_126, "{$1, ($2*)&$3}", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_127, "{{$1, $1 | NIM_STRLIT_FLAG}", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_128, "(($1)&$2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_129, "{NIM_NIL,NIM_NIL}", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_130, "{(($1) $2),NIM_NIL}", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_131, "$1,$n", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_132, "$1", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_133, "{{$1}}", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_134, "{$1}$n", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_135, "{$1, (NimStrPayload*)&$2}", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_136, "extern NIM_CONST $1 $2;$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_137, "goto NIMSTATE_$#;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_138, "$2* $1;$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_139, "\011NimThreadVars* NimTV_;$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_140, "static N_NIMCALL(void, $1)(void)", 32); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_141, "$1 {$n$2$3$4}$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_142, "$1;$n", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_143, "//", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_144, "$#;$n", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_145, "$#($#);$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_146, "$# = $#;$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_147, "NULL", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_148, "((NU8)($1))", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_149, "($4*)(($1)+($2)), ($3)-($2)+1", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_150, "($5*)($1)+(($2)-($4)), ($3)-($2)+1", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_151, "($4*)($1)+($2), ($3)-($2)+1", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_152, "($5*)(*$1)$4+($2), ($3)-($2)+1", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_153, "($5*)$1$4+($2), ($3)-($2)+1", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_154, "$1, $1Len_0", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_155, "(*$1)$3, $2", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_156, "$1$3, $2", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_157, "$1, $2", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_158, "$1.ClP_0($3$1.ClE_0);$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_159, "$1.ClE_0\? $1.ClP_0($3$1.ClE_0):(($4)($1.ClP_0))($2);$n", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_160, "$1.ClP_0($3$1.ClE_0)", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_161, "$1.ClE_0\? $1.ClP_0($3$1.ClE_0):(($4)($1.ClP_0))($2)", 51); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_162, "(", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_163, ")", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_164, ";$n", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_165, ");$n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_166, "[", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_167, ": ", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_168, "Result: ", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_169, "];$n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_170, "]", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_171, "if ($1) goto $2;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_172, "if (!($1)) goto $2;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_173, "$1: ;$n", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_174, "!($1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_175, "($3)((NU$2) ~($1))", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_176, "-($1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_177, "((NI$2)-($1))", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_178, "($1 > 0\? ($1) : -($1))", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_179, "(($4)($1) + ($4)($2))", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_180, "(($4)($1) - ($4)($2))", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_181, "(($4)($1) * ($4)($2))", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_182, "(($4)($1) / ($4)($2))", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_183, "($4)((NU$5)($1) >> (NU$3)($2))", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_184, "($4)((NU$3)($1) << (NU$3)($2))", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_185, "($4)((NI$3)($1) >> (NU$3)($2))", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_186, "($4)($1 & $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_187, "($4)($1 | $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_188, "($4)($1 ^ $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_189, "(($1 <= $2) \? $1 : $2)", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_190, "(($1 >= $2) \? $1 : $2)", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_191, "($4)((NU$3)($1) + (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_192, "($4)((NU$3)($1) - (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_193, "($4)((NU$3)($1) * (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_194, "($4)((NU$3)($1) / (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_195, "($4)((NU$3)($1) % (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_196, "($1 == $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_197, "($1 <= $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_198, "($1 < $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_199, "((NU$3)($1) <= (NU$3)($2))", 26); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_200, "((NU$3)($1) < (NU$3)($2))", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_201, "((NU64)($1) <= (NU64)($2))", 26); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_202, "((NU64)($1) < (NU64)($2))", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_203, "((NU8)($1) == (NU8)($2))", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_204, "((NU8)($1) <= (NU8)($2))", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_205, "((NU8)($1) < (NU8)($2))", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_206, "($1 != $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_207, "($1.ClP_0 == $2.ClP_0 && $1.ClE_0 == $2.ClE_0)", 46); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_208, "($1)($2 $3 $4)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_209, "($#)($#)", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_210, ".Sup", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_211, "$1.m_type == $2", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_212, "static TNimType* $#[2];$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_213, "sizeof($1)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_214, "$1->finalizer = (void*)$2;$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_215, "((NI)sizeof($1))", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_216, "((NI)alignof($1))", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_217, "((NI)offsetof($1, $2))", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_218, "(*($1*) ($2))", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_219, "(($1) ($2))", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_220, "(($1) (ptrdiff_t) ($2))", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_221, "(*($1*) (&$2))", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_222, "($1-1)", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_223, "$1 |= ((NU8)1)<<(($2) & 7);$n", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_224, "($1- $2)", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_225, "$1 |= ((NU16)1)<<(($2) & 15);$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_226, "$1 |= ((NU32)1)<<(($2) & 31);$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_227, "$1 |= ((NU64)1)<<(($2) & 63);$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_228, "$1 &= ~(((NU8)1) << (($2) & 7));$n", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_229, "$1 &= ~(((NU16)1) << (($2) & 15));$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_230, "$1 &= ~(((NU32)1) << (($2) & 31));$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_231, "$1 &= ~(((NU64)1) << (($2) & 63));$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_232, "$1 >= $2 && $1 <= $3", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_233, "$1 == $2", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_234, "(($1 &((NU8)1<<((NU)($2)&7U)))!=0)", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_235, "(($1 &((NU16)1<<((NU)($2)&15U)))!=0)", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_236, "(($1 &((NU32)1<<((NU)($2)&31U)))!=0)", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_237, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_238, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_239, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_240, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_241, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_242, "static NIM_CONST $1 $2 = $3;$n", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_243, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1" ")&7U));$n", 72); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_244, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_245, "$1 = 0;$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_246, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=(($5)(1)<<(($1)%(sizeof($5" ")*8)));$n", 72); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_247, "$1 |=(($3)(1)<<(($2)%(sizeof($3)*8)));$n", 40); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_248, "$1.Field$2", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_249, "LOC$1.source", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_250, "LOC$#.dest", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_251, ".Field$1", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_252, ".$1", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_253, "TFrame $1;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_254, "if (!$1) goto $2;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_255, "goto $1;$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_256, "TMP$1_", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_257, "static void* $#[$#] = {", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_258, "&&TMP$#_, ", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_259, "&&TMP$#_};$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_260, "goto *$#[$#];$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_261, "TMP$#_:$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_262, "case $1: $n$2break;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_263, "goto LA$1_;$n", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_264, "LA$1_: ;$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_265, "NIMSTATE_$#:$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_266, "switch ($1) {$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_267, "default: __assume(0);$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_268, "goto BeforeRet_;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_269, "throw;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_270, "else", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_271, "throw $1;$n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_272, "$n#pragma omp $4$nfor ($1 = $2; $1 <= $3; ++$1)", 47); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_273, "$n#pragma omp $5$nfor ($1 = $2; $1 <= $3; $1 += $4)", 51); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_274, "case -1:$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_275, " goto BeforeRet_;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_276, "case $2: goto $1$2;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_277, "(((NI*) $1)[1] < 0)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_278, "((((NI*) $1.ClE_0)[1]) < 0)", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_279, "$1 N_NIMCALL(void, $2)(void) {$N", 32); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_280, "\011int* nim_hcr_dummy_ = 0;$n\011NIM_BOOL nim_hcr_do_init_ = hcrRegi" "sterGlobal($1, \"module_initialized_\", 1, NULL, (void**)&nim_hcr_" "dummy_);$n", 137); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_281, "{$N", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_282, "\011TFrame FR_; FR_.len = 0;$N", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_283, "}$N$N", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_284, "N_LIB_EXPORT N_NIMCALL(void, $1)(void* handle, N_NIMCALL_PTR(vo" "id*, getProcAddr)(void*, char*)) {$N", 99); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_285, "static $2 $1;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_286, "\011$1 = ($2) $3($4, $5);$n", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_287, "NIM_EXTERNC N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 58); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_288, "N_LIB_EXPORT N_NIMCALL(void, HcrCreateTypeInfos)(void) {$N", 58); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_289, "$nN_LIB_PRIVATE const char* hcr_module_list[] = {$n", 51); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_290, "\011$1,$n", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_291, "\011\"\"};$n", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_292, "$nN_LIB_EXPORT N_NIMCALL(void**, HcrGetImportedModules)() { ret" "urn (void**)hcr_module_list; }$n", 95); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_293, "$nN_LIB_EXPORT N_NIMCALL(char*, HcrGetSigHash)() { return \"$1\";" " }$n$n", 69); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_294, "static void* hcr_handle;$N", 26); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_295, "N_LIB_EXPORT N_NIMCALL(void, $1)(void);$N", 41); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_296, "N_LIB_EXPORT N_NIMCALL(void, $1)(void*, N_NIMCALL_PTR(void*, ge" "tProcAddr)(void*, char*));$N", 91); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_297, "N_LIB_EXPORT N_NIMCALL(void, HcrCreateTypeInfos)(void);$N", 57); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_298, "\011$1();$N", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_299, "\011hcrInit((void**)hcr_module_list, $1, $2, $3, hcr_handle, nimGe" "tProcAddr);$n", 76); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_300, "\011$1(hcr_handle, nimGetProcAddr);$N", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_301, "\011hcrAddModule($1);\012", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_302, "\011HcrCreateTypeInfos();$N", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_303, "\011hcrRegisterGlobal($1, \"cmdCount\", sizeof(cmd_count), NULL, (vo" "id**)&cmd_count);$N", 82); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_304, "\011hcrRegisterGlobal($1, \"cmdLine\", sizeof(cmd_line), NULL, (void" "**)&cmd_line);$N", 79); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_305, "N_LIB_PRIVATE N_NIMCALL(void, $1)(void);$N", 42); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_306, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_307, "/* Generated by Nim Compiler v$1 */$N/* (c) 2019 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N", 131); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_308, "/* Generated by Nim Compiler v$1 */$N/* (c) 2019 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n" " $5 */$N", 201); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_309, "#define NIM_INTBITS $1\012", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_310, "typedef struct {$1} NimThreadVars;$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_311, "#include \"$1\"$N", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_312, "#include $1$N", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_313, "--file:r\"$1\"$N", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_314, "\012[Symbols]$n$1", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_315, "/* Generated by Nim Compiler v$1 */$N/* (c) 2017 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N", 131); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_316, "__$1__", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_317, "#ifndef $1$n#define $1$n", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_318, "N_CDECL(void, NimMain)(void);$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_319, "#endif /* $1 */$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_320, "var F={procname:$1,prev:framePtr,filename:$2,line:0};$n", 55); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_321, "framePtr = F;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_322, "var $1;$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_323, "if ($1 == undefined) {$n", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_324, "if ($1 === undefined) {$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_325, "var $1 = null;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_326, "var $1_Idx = 0;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_327, "[$1]", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_328, "new $1($2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_329, "var $# = null;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_330, "var $#_Idx = 0;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_331, "var $# = $#;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_332, "return [$#, $#];$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_333, "return $#;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_334, "BeforeRet: do {$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_335, "} while (false);$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_336, "try {$n$1} catch (e) {$n alert(\"Unhandled exception:\\n\" + e.mes" "sage + \"\\n\"$n}", 77); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_337, "function $#() { return $#.apply(this, arguments); }$n", 53); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_338, "function $#($#) {$n$#$#$#$#$#", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_339, "arrayConstr($1, $2, $3)", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_340, "NTI$1", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_341, "var $1 = {size: 0,kind: $2,base: null,node: null,finalizer: nul" "l};$n", 68); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_342, "$1.base = $2;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_343, "\"$1\": {kind: 1, offset: $1, typ: $2, name: $3, len: 0, sons: nu" "ll}", 66); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_344, "var NNI$1 = {kind: 2, offset: 0, typ: null, name: null, len: $2" ", sons: {$3}};$n", 79); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_345, "var $1 = {size: 0, kind: $2, base: null, node: null, finalizer:" " null};$n", 72); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_346, "$1.node = NNI$2;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_347, "var NNI$1 = $2;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_348, "{kind: 2, len: $1, offset: 0, typ: null, name: null, sons: [$2]" "}", 64); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_349, "{kind: 1, offset: \"$1\", len: 0, typ: $2, name: $3, sons: null}", 62); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_350, "[$1, $2]", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_351, "[setConstr($1), $2]", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_352, "{kind: 3, offset: \"$1\", len: $3, typ: $2, name: $4, sons: [$5]}", 63); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_353, "{kind: 1, offset: \"Field$1\", len: 0, typ: $2, name: \"Field$1\", " "sons: null}", 74); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_354, "Field$1: $2", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_355, "m_type: $1", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_356, "$#: ", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_357, "({$1})", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_358, "nimCopy(null, $1, $2)", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_359, "Tmp$1", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_360, "var $1 = $2, $3 = $1[0], $3_Idx = $1[1];$n", 42); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_361, "$1 = nimCopy(null, $1, $2);$n", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_362, "$1[0][0]", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_363, "$1[0][1]", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_364, "$1[0]", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_365, "$1[1]", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_366, "makeNimstrLit($1)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_367, "// line $2 \"$1\"$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_368, "F.line = $1;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_369, "($1 || $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_370, "if ($1) $2 = true; else {", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_371, "$2 = $1;", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_372, "($1 && $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_373, "if (!$1) $2 = false; else {", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_374, "$1[0][$1[1]]", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_375, "($1 = $2, $1)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_376, "$1 = (($5 $2 $3) $4)", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_377, "(($1 $2 $3) $4)", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_378, "addInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_379, "($1 + $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_380, "subInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_381, "($1 - $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_382, "mulInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_383, "($1 * $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_384, "divInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_385, "Math.trunc($1 / $2)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_386, "modInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_387, "Math.trunc($1 % $2)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_388, "($1 / $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_389, "($1 << $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_390, "($1 >> $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_391, "($1 & $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_392, "($1 | $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_393, "($1 ^ $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_394, "nimMin($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_395, "nimMax($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_396, "($1 % $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_397, "negInt($1)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_398, "negInt64($1)", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_399, "absInt($1)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_400, "Math.abs($1)", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_401, "+($1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_402, "~($1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_403, "nimCharToStr($1)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_404, "nimBoolToStr($1)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_405, "cstrToNimstr(($1)+\"\")", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_406, "cstrToNimstr($1)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_407, "(($1 $2) >>> $3)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_408, "($# == $# && $# == $#)", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_409, "var $1 = $2; $2 = $3; $3 = $1;$n", 32); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_410, "var $1 = $2; $2 = $3; $3 = $1;", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_411, "$1 - 1", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_412, "subInt($1, 1)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_413, "if ($1 != null) { addChar($3, $2); } else { $3 = [$2]; }", 56); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_414, "if ($1 != null) { $4 += $2; } else { $4 = $2$3; }", 49); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_415, ".slice()", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_416, "if ($1 != null) { $4 = ($4).concat($2); } else { $4 = $2$3; }", 61); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_417, "if ($1 != null) { $3.push($2); } else { $3 = [$2]; }", 52); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_418, "var $1 = nimCopy(null, $2, $3);$n", 33); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_419, "[$1].concat(", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_420, "($1 || []).concat(", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_421, "[$1],", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_422, "$1 || [],", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_423, "[$1])", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_424, "$1 || [])", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_425, "eqStrings($1, $2)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_426, "(cmpStrings($1, $2) <= 0)", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_427, "(cmpStrings($1, $2) < 0)", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_428, "($1 == null)", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_429, "($# == null && $# === 0)", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_430, "$1 = $2;$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_431, "$1 = [$3]; $2 = 0;$n", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_432, "$1 = [[$2], 0];$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_433, "($1 \? 1:0)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_434, "($1 != null \? $2.length : 0)", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_435, "$1.length", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_436, "($1 != null \? ($2.length-1) : -1)", 33); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_437, "$1 += $2", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_438, "$1 = addInt($3, $2)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_439, "$1 -= $2", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_440, "$1 = subInt($3, $2)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_441, "($1 == null \? $3 = mnewString($2) : $3.length = $2)", 51); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_442, "if ($1 === null) $4 = [];\012 if ($4.length < $2) { " "for (var i=$4.length;i<$5;++i) $4.push($3); }\012 els" "e { $4.length = $5; }", 148); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_443, "SetCard($1)", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_444, "SetLt($1, $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_445, "SetLe($1, $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_446, "SetEq($1, $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_447, "SetMul($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_448, "SetPlus($1, $2)", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_449, "SetMinus($1, $2)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_450, "$1[$2] = true", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_451, "delete $1[$2]", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_452, "($1[$2] != undefined)", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_453, "$1 = new Array($2); for (var i=0;i<$2;++i) {$1[i]=$3;}", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_454, "[]", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_455, "($1.m_type == $2)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_456, "isObj($1.m_type, $2)", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_457, "$1 = null, $2 = 0;$n", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_458, "$1 = genericReset($3, $2);$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_459, "($1.slice($2))", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_460, "mnewString($1)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_461, "mnewString(0)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_462, "($1 = $2, $1[0]), $1[1]", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_463, "($1 = $2, $1)[0]", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_464, "($1.slice($2, $3+1))", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_465, "var $1 = $2;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_466, "Field$#: [$#, $#]", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_467, "Field$#: $#", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_468, "$#: [$#, $#]", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_469, "$#: $#", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_470, "{$1}", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_471, "(!!($1))", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_472, "(($1)|0)", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_473, "if ($1[$2.$3]$4undefined) { raiseFieldError(makeNimstrLit($5));" " }$n", 67); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_474, "!==", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_475, "===", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_476, "chckIndx($1, $2, ($3 != null \? $3.length : 0)+$2-1)-$2", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_477, "($1)-$2", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_478, "$1.charCodeAt($2)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_479, "($1 $2)", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_480, "($1|0)", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_481, "($1 - ($2 $3))", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_482, "null", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_483, "chckRange($1, $2, $3)", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_484, "toJSStr($1)", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_485, "L$1: do {$n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_486, "} while(false);$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_487, "else {$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_488, "if ($1) {$n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_489, "L$1: while (true) {$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_490, "if (!$1) break L$2;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_491, "switch (toJSStr($1)) {$n", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_492, "default: $n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_493, "break BeforeRet;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_494, "break L$1;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_495, "$1 = nimCopy(null, $2, $3);$n", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_496, "nimCopy($1, $2, $3);$n", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_497, "var $1 = $4; $2 = $1[0]; $3 = $1[1];$n", 38); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_498, "$# = [$#, $#];$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_499, "$1 = $2; $3 = $4;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_500, "try {$n", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_501, "--excHandler;$n} catch (EXC) {$n var prevJSError = lastJSError;" "$n lastJSError = EXC;$n --excHandler;$n", 102); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_502, "framePtr = $1;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_503, "lastJSError instanceof $1", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_504, "isObj(lastJSError.m_type, $1)", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_505, "if (lastJSError && ($1)) {$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_506, "var $1 = lastJSError;$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_507, "lastJSError = prevJSError;$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_508, "raiseException($1, $2);$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_509, "$1 = true;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_510, "/* Generated by the Nim Compiler v$1 */$n/* (c) 2019 Andreas " "Rumpf */$n$nvar framePtr = null;$nvar excHandler = 0;$nvar lastJ" "SError = null;$nif (typeof Int8Array === \'undefined\') Int8Array " "= Array;$nif (typeof Int16Array === \'undefined\') Int16Array = Ar" "ray;$nif (typeof Int32Array === \'undefined\') Int32Array = Array;" "$nif (typeof Uint8Array === \'undefined\') Uint8Array = Array;$nif" " (typeof Uint16Array === \'undefined\') Uint16Array = Array;$nif (" "typeof Uint32Array === \'undefined\') Uint32Array = Array;$nif (ty" "peof Float32Array === \'undefined\') Float32Array = Array;$nif (ty" "peof Float64Array === \'undefined\') Float64Array = Array;$n", 633); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_511, "Deprecated", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_512, "Deprecated:", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_513, "\012<p><strong class=\"examples_text\">$1</strong></p>\012", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_514, "\012\\textbf{$1}\012", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_515, "<span class=\"Comment\">$1</span>", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_516, "\\spanComment{$1}", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_517, "<span class=\"Keyword\">$1</span>", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_518, "\\spanKeyword{$1}", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_519, "<span class=\"Operator\">$1</span>", 32); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_520, "\\spanOperator{$1}", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_521, "<span class=\"StringLit\">$1</span>", 33); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_522, "\\spanStringLit{$1}", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_523, "<span class=\"CharLit\">$1</span>", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_524, "\\spanCharLit{$1}", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_525, "<span class=\"DecNumber\">$1</span>", 33); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_526, "\\spanDecNumber{$1}", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_527, "<span class=\"FloatNumber\">$1</span>", 35); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_528, "\\spanFloatNumber{$1}", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_529, "<a href=\"#$2\"><span class=\"Identifier\">$1</span></a>", 52); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_530, "\\spanIdentifier{$1}", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_531, "<a href=\"$1#$2\"><span class=\"Identifier\">$3</span></a>", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_532, "<span class=\"Identifier\">$1</span>", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_533, "<span><span class=\"Other\">{</span><span class=\"Other pragmadots" "\">...</span><span class=\"Other\">}</span></span><span class=\"prag" "mawrap\"><span class=\"Other\">$1</span><span class=\"pragma\">", 185); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_534, "\\spanOther{$1}", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_535, "</span><span class=\"Other\">$1</span></span>", 43); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_536, "<span class=\"Other\">$1</span>", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_537, "<a class=\"reference external\" href=\"$2\">$1</a>", 46); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_538, "<a href=\"$2#$1\"><span class=\"Identifier\">$1</span></a>", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_539, "$1 -> \"$2\";$n", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_540, "digraph $1 {$n$2}$n", 19); static N_NIMCALL(void, Marker_tyRef__4hi0XQqK9aLiPuWT9acsXm9aQ)(void* p, NI op) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a; a = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)p; nimGCvisit((void*)(*a).left, op); nimGCvisit((void*)(*a).right, op); nimGCvisit((void*)(*a).data, op); } static N_NIMCALL(void, TM__Vw9cfUOQOae9b9bzZBlucMZQg_3)(void) { NI T1_; T1_ = (NI)0; for (T1_ = 0; T1_ < 4096; T1_++) { nimGCvisit((void*)cache__WGMp5Wo1NlgbAMOysPIfmQ[T1_], 0); } } N_LIB_PRIVATE N_NIMCALL(NI, len__9b0YRltzV3kNSE9aQTsG82wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a) { NI result; result = (NI)0; { if (!(a == NIM_NIL)) goto LA3_; result = ((NI) 0); } goto LA1_; LA3_: ; { result = ((*a).L > 0? ((*a).L) : -((*a).L)); } LA1_: ; return result; } static N_INLINE(void, incRef__AT1eRuflKWyTTBdLjEDZbg_3system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { (*c).refcount = (NI)((NU64)((*c).refcount) + (NU64)(((NI) 8))); } static N_INLINE(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*, usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem)(void* usr) { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* result; result = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; result = ((tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*) ((NI)((NU64)(((NI) (ptrdiff_t) (usr))) - (NU64)(((NI) 16))))); return result; } static N_INLINE(void, rtlAddZCT__AT1eRuflKWyTTBdLjEDZbg_2system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { addZCT__Y66tOYFjgwJ0k4aLz4bc0Q((&gch__IcYaEuuWivYAS86vFMTS3Q.zct), c); } static N_INLINE(void, decRef__AT1eRuflKWyTTBdLjEDZbgsystem)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { (*c).refcount = (NI)((NU64)((*c).refcount) - (NU64)(((NI) 8))); { if (!((NU64)((*c).refcount) < (NU64)(((NI) 8)))) goto LA3_; rtlAddZCT__AT1eRuflKWyTTBdLjEDZbg_2system(c); } LA3_: ; } static N_INLINE(void, asgnRef)(void** dest, void* src) { { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T5_; if (!!((src == NIM_NIL))) goto LA3_; T5_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; T5_ = usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem(src); incRef__AT1eRuflKWyTTBdLjEDZbg_3system(T5_); } LA3_: ; { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T10_; if (!!(((*dest) == NIM_NIL))) goto LA8_; T10_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; T10_ = usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem((*dest)); decRef__AT1eRuflKWyTTBdLjEDZbgsystem(T10_); } LA8_: ; (*dest) = src; } static N_INLINE(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem)(tySequence__WwUFq9cJ2xKRlsAWVEHyPRg** s) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NI L; NI T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = ((*s) ? (*s)->Sup.len : 0); L = (NI)(T1_ - ((NI) 1)); result = (*s)->data[L]; unsureAsgnRef((void**) (&(*s)), (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) setLengthSeqV2(&((*s))->Sup, (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), ((NI) (L)))); return result; } static N_INLINE(void, nimCopyMem)(void* dest, void* source, NI size) { void* T1_; T1_ = (void*)0; T1_ = memcpy(dest, source, ((size_t) (size))); } static N_INLINE(void, copyMem__i80o3k0SgEI5gTRCzYdyWAsystem)(void* dest, void* source, NI size) { nimCopyMem(dest, source, size); } static N_INLINE(void, appendString)(NimStringDesc* dest, NimStringDesc* src) { { if (!!((src == NIM_NIL))) goto LA3_; copyMem__i80o3k0SgEI5gTRCzYdyWAsystem(((void*) ((&(*dest).data[(*dest).Sup.len]))), ((void*) ((*src).data)), ((NI) ((NI)((*src).Sup.len + ((NI) 1))))); (*dest).Sup.len += (*src).Sup.len; } LA3_: ; } N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, dollar___mZ66tEveFIQokq3arf8Klw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r) { NimStringDesc* result; NI T1_; result = (NimStringDesc*)0; T1_ = (NI)0; T1_ = len__9b0YRltzV3kNSE9aQTsG82wg(r); result = mnewString(((NI) (T1_))); result = setLengthStr(result, ((NI) 0)); { NimStringDesc* s; s = (NimStringDesc*)0; { tySequence__WwUFq9cJ2xKRlsAWVEHyPRg* stack; if (!!((r == NIM_NIL))) goto LA5_; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) newSeq((&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), 1); asgnRef((void**) (&stack->data[0]), r); { while (1) { NI T9_; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* it; T9_ = (stack ? stack->Sup.len : 0); if (!(((NI) 0) < T9_)) goto LA8; it = pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem((&stack)); { while (1) { NI T12_; if (!!(((*it).left == NIM_NIL))) goto LA11; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) incrSeqV3((TGenericSeq*)(stack), (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_)); T12_ = stack->Sup.len++; asgnRef((void**) (&stack->data[T12_]), (*it).right); it = (*it).left; } LA11: ; } s = (*it).data; result = resizeString(result, (s ? s->Sup.len : 0) + 0); appendString(result, s); } LA8: ; } } LA5_: ; } return result; } static N_INLINE(void, nimGCunrefNoCycle)(void* p) { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T1_; T1_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; T1_ = usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem(p); decRef__AT1eRuflKWyTTBdLjEDZbgsystem(T1_); } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, newRope__dBdikNFB2Y7QJ9aVJE7dGHg)(NimStringDesc* data) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NimStringDesc* T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*) newObj((&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_), sizeof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA)); (*result).Sup.m_type = (&NTI__OFzf0kSiPTcNreUIeJgWVA_); (*result).L = ((NI64)-((data ? data->Sup.len : 0))); T1_ = (NimStringDesc*)0; T1_ = (*result).data; (*result).data = copyStringRC1(data); if (T1_) nimGCunrefNoCycle(T1_); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___ShdZ6VrAQkY0nWR9a39b9bGdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; { if (!(a == NIM_NIL)) goto LA3_; result = b; } goto LA1_; LA3_: ; { if (!(b == NIM_NIL)) goto LA6_; result = a; } goto LA1_; LA6_: ; { result = newRope__dBdikNFB2Y7QJ9aVJE7dGHg(((NimStringDesc*) NIM_NIL)); (*result).L = (NI)(((*a).L > 0? ((*a).L) : -((*a).L)) + ((*b).L > 0? ((*b).L) : -((*b).L))); asgnRef((void**) (&(*result).left), a); asgnRef((void**) (&(*result).right), b); } LA1_: ; return result; } static N_INLINE(int, nimCmpMem)(void* a, void* b, NI size) { int result; result = (int)0; result = memcmp(a, b, ((size_t) (size))); return result; } static N_INLINE(NIM_BOOL, equalMem__9bGgqEk7RXXl6eqM9c1HdELAsystem)(void* a, void* b, NI size) { NIM_BOOL result; int T1_; result = (NIM_BOOL)0; T1_ = (int)0; T1_ = nimCmpMem(a, b, size); result = (T1_ == ((NI32) 0)); return result; } static N_INLINE(NIM_BOOL, eqStrings)(NimStringDesc* a, NimStringDesc* b) { NIM_BOOL result; NI alen; NI blen; { result = (NIM_BOOL)0; alen = (a ? a->Sup.len : 0); blen = (b ? b->Sup.len : 0); { if (!(alen == blen)) goto LA3_; { if (!(alen == ((NI) 0))) goto LA7_; result = NIM_TRUE; goto BeforeRet_; } LA7_: ; result = equalMem__9bGgqEk7RXXl6eqM9c1HdELAsystem(((void*) ((&a->data[((NI) 0)]))), ((void*) ((&b->data[((NI) 0)]))), ((NI) (alen))); goto BeforeRet_; } LA3_: ; }BeforeRet_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, insertInCache__yShmEg9cffWxI7s5XzEKBow_2)(NimStringDesc* s) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NI h; NI T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; gCacheTries__5GfZTThHPBfB9bjRZdFluBw += ((NI) 1); T1_ = (NI)0; T1_ = hash__6PCYkKlCNhq9cnRLnqWKkwQ(s); h = (NI)(T1_ & ((NI) 4095)); result = cache__WGMp5Wo1NlgbAMOysPIfmQ[(h)- 0]; { NIM_BOOL T4_; T4_ = (NIM_BOOL)0; T4_ = (result == 0); if (T4_) goto LA5_; T4_ = !(eqStrings((*result).data, s)); LA5_: ; if (!T4_) goto LA6_; gCacheMisses__fLRm9am8S0daYBVNK6JKyBg += ((NI) 1); result = newRope__dBdikNFB2Y7QJ9aVJE7dGHg(s); asgnRef((void**) (&cache__WGMp5Wo1NlgbAMOysPIfmQ[(h)- 0]), result); } LA6_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, rope__yShmEg9cffWxI7s5XzEKBow)(NimStringDesc* s) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; { if (!((s ? s->Sup.len : 0) == ((NI) 0))) goto LA3_; result = NIM_NIL; } goto LA1_; LA3_: ; { result = insertInCache__yShmEg9cffWxI7s5XzEKBow_2(s); } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___Z7W1o5nPSc3ExfO5f7j1Gg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, NimStringDesc* b) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = rope__yShmEg9cffWxI7s5XzEKBow(b); result = amp___ShdZ6VrAQkY0nWR9a39b9bGdQ(a, T1_); return result; } N_LIB_PRIVATE N_NIMCALL(void, add__yG4AKzsBRS1W4MANDlXQeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, NimStringDesc* b) { unsureAsgnRef((void**) (&(*a)), amp___Z7W1o5nPSc3ExfO5f7j1Gg((*a), b)); } N_LIB_PRIVATE N_NIMCALL(void, add__IM4kcMNkkOLJtqdEqSxR8A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { unsureAsgnRef((void**) (&(*a)), amp___ShdZ6VrAQkY0nWR9a39b9bGdQ((*a), b)); } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA)(NimStringDesc* frmt, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NI i; NI length; NI num; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; i = ((NI) 0); length = (frmt ? frmt->Sup.len : 0); result = NIM_NIL; num = ((NI) 0); { while (1) { NI start; if (!(i < length)) goto LA2; { if (!((NU8)(frmt->data[i]) == (NU8)(36))) goto LA5_; i += ((NI) 1); switch (((NU8)(frmt->data[i]))) { case 36: { add__yG4AKzsBRS1W4MANDlXQeg(&result, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_4)); i += ((NI) 1); } break; case 35: { i += ((NI) 1); add__IM4kcMNkkOLJtqdEqSxR8A(&result, args[num]); num += ((NI) 1); } break; case 48 ... 57: { NI j; j = ((NI) 0); { while (1) { j = (NI)((NI)((NI)(j * ((NI) 10)) + ((NU8)(frmt->data[i]))) - ((NI) 48)); i += ((NI) 1); { NIM_BOOL T14_; T14_ = (NIM_BOOL)0; T14_ = ((frmt ? frmt->Sup.len : 0) <= i); if (T14_) goto LA15_; T14_ = !((((NU8)(frmt->data[i])) >= ((NU8)(48)) && ((NU8)(frmt->data[i])) <= ((NU8)(57)))); LA15_: ; if (!T14_) goto LA16_; goto LA10; } LA16_: ; } } LA10: ; num = j; { if (!((NI)((argsLen_0-1) + ((NI) 1)) < j)) goto LA20_; { NimStringDesc* T26_; if (!NIM_TRUE) goto LA24_; T26_ = (NimStringDesc*)0; T26_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50); appendString(T26_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_5)); appendString(T26_, frmt); failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T26_); } LA24_: ; } goto LA18_; LA20_: ; { add__IM4kcMNkkOLJtqdEqSxR8A(&result, args[(NI)(j - ((NI) 1))]); } LA18_: ; } break; case 123: { NI j_2; i += ((NI) 1); j_2 = ((NI) 0); { while (1) { if (!(((NU8)(frmt->data[i])) >= ((NU8)(48)) && ((NU8)(frmt->data[i])) <= ((NU8)(57)))) goto LA30; j_2 = (NI)((NI)((NI)(j_2 * ((NI) 10)) + ((NU8)(frmt->data[i]))) - ((NI) 48)); i += ((NI) 1); } LA30: ; } num = j_2; { if (!((NU8)(frmt->data[i]) == (NU8)(125))) goto LA33_; i += ((NI) 1); } goto LA31_; LA33_: ; { { NimStringDesc* T40_; if (!NIM_TRUE) goto LA38_; T40_ = (NimStringDesc*)0; T40_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50); appendString(T40_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_6)); appendString(T40_, frmt); failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T40_); } LA38_: ; } LA31_: ; { if (!((NI)((argsLen_0-1) + ((NI) 1)) < j_2)) goto LA43_; { NimStringDesc* T49_; if (!NIM_TRUE) goto LA47_; T49_ = (NimStringDesc*)0; T49_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50); appendString(T49_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_7)); appendString(T49_, frmt); failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T49_); } LA47_: ; } goto LA41_; LA43_: ; { add__IM4kcMNkkOLJtqdEqSxR8A(&result, args[(NI)(j_2 - ((NI) 1))]); } LA41_: ; } break; case 110: { add__yG4AKzsBRS1W4MANDlXQeg(&result, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_8)); i += ((NI) 1); } break; case 78: { add__yG4AKzsBRS1W4MANDlXQeg(&result, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_8)); i += ((NI) 1); } break; default: { { NimStringDesc* T58_; if (!NIM_TRUE) goto LA56_; T58_ = (NimStringDesc*)0; T58_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50); appendString(T58_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_9)); appendString(T58_, frmt); failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T58_); } LA56_: ; } break; } } LA5_: ; start = i; { while (1) { if (!(i < length)) goto LA60; { if (!!(((NU8)(frmt->data[i]) == (NU8)(36)))) goto LA63_; i += ((NI) 1); } goto LA61_; LA63_: ; { goto LA59; } LA61_: ; } LA60: ; } LA59: ; { NimStringDesc* T70_; if (!(start <= (NI)(i - ((NI) 1)))) goto LA68_; T70_ = (NimStringDesc*)0; T70_ = substr__2yh9cer0ymNRHlOOg8P7IuA(frmt, start, (NI)(i - ((NI) 1))); add__yG4AKzsBRS1W4MANDlXQeg(&result, T70_); } LA68_: ; } LA2: ; } return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UQfMnMPks8jKz20fTXQy9bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_10), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, rope__KOisMGxcPhz6CcSmxgwEQQ)(NI64 i) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NimStringDesc* T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; gCacheIntTries__opyfsNv023Md1P05mqsDew += ((NI) 1); T1_ = (NimStringDesc*)0; T1_ = nimInt64ToStr(i); result = rope__yShmEg9cffWxI7s5XzEKBow(T1_); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KxpxlR6eqq3gRIOYTfR67w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_11), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IFeEbVhQpPGgxkLehuSiBA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_12), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BYiowJAm8zF7RBRISElaLg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_13), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZkZcMxwzInnijXy5kz1K3A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_14), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(void, prepend__IM4kcMNkkOLJtqdEqSxR8A_2)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { unsureAsgnRef((void**) (&(*a)), amp___ShdZ6VrAQkY0nWR9a39b9bGdQ(b, (*a))); } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___4cYKitaHx6RQ9azRtQsZp6w)(NimStringDesc* a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = rope__yShmEg9cffWxI7s5XzEKBow(a); result = amp___ShdZ6VrAQkY0nWR9a39b9bGdQ(T1_, b); return result; } N_LIB_PRIVATE N_NIMCALL(void, writeRope__FwuzOBq6SLlanVUstm8q9cA)(FILE* f, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r) { { NimStringDesc* s; s = (NimStringDesc*)0; { tySequence__WwUFq9cJ2xKRlsAWVEHyPRg* stack; if (!!((r == NIM_NIL))) goto LA4_; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) newSeq((&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), 1); asgnRef((void**) (&stack->data[0]), r); { while (1) { NI T8_; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* it; T8_ = (stack ? stack->Sup.len : 0); if (!(((NI) 0) < T8_)) goto LA7; it = pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem((&stack)); { while (1) { NI T11_; if (!!(((*it).left == NIM_NIL))) goto LA10; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) incrSeqV3((TGenericSeq*)(stack), (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_)); T11_ = stack->Sup.len++; asgnRef((void**) (&stack->data[T11_]), (*it).right); it = (*it).left; } LA10: ; } s = (*it).data; write__PArlm09bKklm2BLsCg6YtaA(f, s); } LA7: ; } } LA4_: ; } } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___G9aA37gQrW88KHzpCAwhgjQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_15), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___PoDv5ydEvGdd9aiIF9cOiAPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_16), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vzbf0XksfaFTXNoTT6BCwA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_17), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lQVSDPkAFXHNoa1N7jYrNw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_18), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6d8an6hdqiIrRjPW1wEh5Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_19), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gMbiWAc0IjihIq46IYhmAw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_20), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uHsu7fLXac4OhMNd79bSJwA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_21), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3WM9b4PeyDKoIDFMvYcQX3w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_22), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___p4LhaCxKpUERrq9cB9b8Mp9cw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_23), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TbMwXzwNL7txOQADiTjwKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_24), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E0nDsXp7tY4mC1BnrrjWmA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_25), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mbjeaBETPixw9bUvyk31B6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_26), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AfR9bXoD9bcehKoM7F8O79bYA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_27), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nlZFDYB4M9bmBbYqEropRVw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_28), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dwsIkeXQe0E8HKrzN9aRE5A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_29), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fIR1FG0QPRsKvEYKq4tJUQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_30), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jADQs38xm62v1oxF2cSvEw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_31), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___DZV83DjWnQ9a19atC2oeswXg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_32), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sfvTjNjtOC86mU9bHczF6ow)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_33), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9ab1aKSDn70Vte0NcIItnaQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_34), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jadqNPnY9aM3oxYK6jarLrA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_35), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LvsIDF8olc08xBiqCYIUog)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_36), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6Tfa1iP1ENVlWbe89cSELSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_37), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hKg2Id9cvzE5Dgl9cU31c4Vw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_38), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___H3xXuIFdbz4MNb5T6BSfcQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_39), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ELXFo0GedkhGYj9bocTHZAg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_40), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9aLrcjgzGJE3f9ab2uR37jog)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_41), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3Q9c5iS9btBDBXZVoQktb1XQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_42), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MALQXTKXJv7x9a9c247satLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_43), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0nBiBCva6YS9a9bSV2Vr7Zxw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_44), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yyhPPkMkLJqWG6p8HGn9aoA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_45), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___t8gRNGR1flvaCNlBxuLn1A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_46), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xQaqlAwFuwxqBFixw7ewLg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_47), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2SWcbuU7RHQR0b8y9aJ9a5VQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_48), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gSgutt9b7GMWVGBkCt0UHAQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_49), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Vcuq0AWiVDndx4UH9cJ9cBRg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_50), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___l4wxq9cmPihXoF5xnDVNR1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_51), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zgEKWXsZtT6lqQ6XlgfrsA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_52), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uXZ30k0oJEqGPZW57O3dwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_53), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tTI9aMQiBZdiEeBIVh7QtYA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_54), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___VJBBlA9aMl5p0yYB1WzSMVg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_55), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jw4Sb0OSpKH1T5cLz7iyzA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_56), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0RQ2PINB4t8FjFlNUM6N9cQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_57), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LQ9bGxpANW8yeg5P9c0UYAaQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_58), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___f8tdlskieCnWysl9c9blzqZg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_59), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KbFpNe1pZ7hIuQi7dp1dSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_60), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nunbo9aB0HmmYQJ3InIBEzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_61), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RBxLok7DyUB0aHl9bxPIl9bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_62), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NARRjCd1x5Fr7NTTcoPRrw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_63), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NlLLwmZHOiJUpZfuk00AWA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_64), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mF9aI9b3hDjj53TD2C2gTrHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_65), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___PafMws9cJ9arr9a0RVMoIHmAw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_66), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3lAlmrWiRqEg9a9cd9a8kNhig)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_67), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___f8NIixSwWrk6SXQ3BFamWw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_68), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TTRh79a14hh1gb0owIP1Y6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_69), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TmeCjGna9cPfiHHcfqmKXjw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_70), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FsfRVuOOBePjn9cQ9aK7Vh1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_71), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___paA0sar8RKZqiwEaDfWo2A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_72), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jr9cXNQhhlLDfFJH4RSjeZg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_73), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EnzikEr9bDhOR6GYxWuYSwQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_74), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QqzUiJcAEZE2azDhIWHrgg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_75), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___20ZujjIFPkyqvS2OmenEAA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_76), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Vxo9ayk1xB18if39aZ1TBnKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_77), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NtQEfuK9bXszNTfYU57z19bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_78), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AKNexo4CH8G2vDeWW34Vpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_79), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LE3oWAmB5YDSDHm3LNHhCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_80), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___W83I2xs7lC32PrMs9bq4P2w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_81), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JKMGBJtXtDvc0NwxujFmZQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_82), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TA8WFV49atYpIneJatQWALw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_83), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nPenDL3j2Q6A1an1Cl3oCA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_84), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TNkzce2Sd9bck2QRtketc8A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_85), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kqRXw2WRJqDnfQK0N30ydw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_86), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BKnrQUIV2xGn2MO0RK09aUw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_87), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SCyrk9acEm3vLZhXCV1fGNg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_88), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___erDe9aYc2BNxzH9brKlmtEBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_89), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HSAgkeH84eiEd8MfKIuBQA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_90), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1AD3Wp47Hcdfg6PO2ac0NQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_91), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T11tCz9bIGT2CcftAwrDXZw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_92), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lS9bA1j3Ue6pp7sCliDsT8g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_93), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___M3h9cTlVBrj2vakKBqQRlMA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_94), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BBAyGuVoK6QA7nXfPUIYKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_95), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___g9b9arp3BWCGRHDe21SJso6w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_96), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___09aVguRR64dWfw4b6fKBcqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_97), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tgUnLdPVK0vRqC0pWxMClQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_98), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FBNsdfF5FNrY4P9cYQIfvZQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_99), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cB7zULPbG5vWWdCukRjdqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_100), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dpzmcz9a6kXbhFacdElIMOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_101), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AWFBEodxoi9a61KDUc9aiw1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_102), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vHbYzYlzLPcurSm0Hu8InQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_103), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nzT6Rke9c7tkW9b3XMmld2LA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_104), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cCc2iMcL3MEBZTTL3LCW1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_105), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ahBYcGrhpPvM5dTdzCQBrQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_106), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XI9awM9a9aQ9cB9bcS7uDRsa1Rw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_107), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cWNaGuyEpBbdBlD9b5nY1ug)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_108), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6P67I9czJ9aa9aZzVyYWUiGlw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_109), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S4jE5dFDtcCC8ODzxaJk6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_110), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Msid9awGKVeVe7p3v7WfNQA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_111), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xyRsdWsGY1DVVispyn0Xeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_112), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EPABzhs2B9atAvHV4CUTw2Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_113), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2MhCcipNmSHgcDtN4cr8ng)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_114), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0ul9cDZYl7YkH1RhZBTd9c6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_115), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QFf4DPoOk6Jy59cL2OASJzw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_116), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7yDHbEsisDNKcqQHIRgOuQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_117), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GwVmUG4AZCEAP8dBk4TGHg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_118), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___q7DaQZqCe0lRO0rhBWzM0w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_119), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hGIvKp3CGssDQ2vSvfksxQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_120), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9c1P82lz6H9anMKDbz1vYNpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_121), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dbg9bsMENUwtF9aO45wEGG3Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_122), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ym0Pr6z8A9ajyOAgotpd9a9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_123), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___izqbVTMtpY7kMiTK4bPJ6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_124), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rouofEnBX1ok9aMXmOsKdHg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_125), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___C3GQZbey70223GyG307UFg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_126), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yxmLIVRKySYknm2wSBp9cpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_127), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8u7UPO7ZpaMkWoJRtZLlYQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_128), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xXT7cKE1NTiL4U2MdlA2yQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_129), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___44q9ak51X9b9bmuZ9cK4LsFWOg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_130), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___77dMna2dOod5LqwYkRMZGg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_131), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QXMcmOst45ThYFLo9cOKDiQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_132), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zldA3DCxzpAhONjlfz7iIg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_133), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dnB3So2xw9c189c09a9cc9b4hxA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_134), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___r2gXVULKoAtQjkgjf0Z4wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_135), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___VsLzrOz1nS9cRBBz9ccZfETQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_136), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tRSKshYob5uzZE3eBVe59cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_137), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vcbf2lEZaiSjbAHwgt9aKXw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_138), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sb2NV56uvmvOtYkgVsaVQQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_139), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7STLi75js8HXlmFg7Abt9bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_140), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5O50gePV9adn3wgFGWjlOLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_141), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9a3Y7eeGNXkOCLUktwxzN9ag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_142), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Ng8dczn37bLzoM9bsVdPwjQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_143), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___boICAAvO1zkTlYDOuEaj6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_144), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LeuvM3mIc6pSNktpm9cHSVw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_145), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mxQQ2vwZhwfDagj5SEXeHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_146), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___x2NKZw9brJpylbwEtLfx9a9bg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_147), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TmT2Gs9cB7RN9cmo9c9cBpfKsA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_148), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RiPFNabSvay09bAW4Jic2ag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_149), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___efSHgbCUYoX1lUK7M9aj4Pg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_150), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Vmgih7rhd9cXUC9cEBz2cwXQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_151), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rB3209aHcqpT39anNUezpSjg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_152), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___x85Q1O2QUnYbstPlxUCyAQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_153), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___L3AeZ1n9aK4C1jsBCeaCmlQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_154), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ebmRHYtM9cCbYF6WvKDfQ9cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_155), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qE1JtEDDOvP6J49a9cv9aK1Dg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_156), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ctvQ2lU9b9bnVVpNP4GhIo2Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_157), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8bHx2qDxS2yWIId1X52mqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_158), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kTDR7D9c9aomjcaUQOmKJ9csg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_159), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1tj59chZC08k4TWYeZiqDnQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_160), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___533QKY9a8quvLM1SsLE1JfQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_161), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uFJUSitn9c1Tw6cF9cZf6x6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_162), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___G8iCcDovsaw25PkF7wHs0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_163), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SY4U2QvmoQxocaG8MOmyHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_164), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bhkFYKbURxGcJnKpswdr2Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_165), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lTsL0bi6njxzDh9c8A32r2w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_166), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___k4VEB3kaBL72FRQN8buzSg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_167), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YbQIA9cHUESCyYT1WEeIVbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_168), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___66KauNYQRukYNgmb6bVXEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_169), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S550SlHmWbDpD7rs0J2lrA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_170), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sGnLi1DjaBomQ9c9a6MOCA5g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_171), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bEKtSmboScaCP8PPnlOWqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_172), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZpwWwpfBXgcQ6xoLOH4CJw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_173), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GHW5yjG8N9c2BQBun6aBJzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_174), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Yup67SPGRVcwMdmZwc9cSag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_175), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ec65mR1N7BSL9cmUa3z9czvA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_176), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ibyK70G44kCK9cN8nAkxyGA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_177), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___H9b69aGZGrLOiKWQdd30yQ9bg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_178), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Te7bvH18PbGe5siNJ9aDTTA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_179), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MUaBvSw0MHw3qQi9bYavAmg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_180), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bWYxjLMocXEvYgQQcC63rw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_181), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZpcNBrQMfioSvQNxKHhu9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_182), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gywCjjjPZobIva6liQWNLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_183), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6PDHoyz05lEjxGNE0k0ikw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_184), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AXGsBlGV5DoEOwPJSl9bdJw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_185), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ygzR9aJ6oM1bZTq4Z2lNO3Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_186), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uYVc6UX8hcaEdrHosUQAOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_187), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AlV8xJkjCXujAUesHxezgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_188), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___L9asecuKwevQN2h9cWzyv6oA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_189), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nZD9cadh12dcqTFsXBHbCRg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_190), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dz1JHdrf1p9bPB9ad2dZBtYw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_191), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0MUu7DVBoaLHTVUZe9bKoIA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_192), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___29aIWEGnJW0wnITIeSKWfFg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_193), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___n2CigWG38YNInkiL4n8g7A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_194), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bb3v9bDRLv9c9bcQzGH9c5H4Gw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_195), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tkJq8W3gQVDjuu9aT3THC6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_196), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oyQkqbRkRzo43y6iRevkaA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_197), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YuphtPwdJHG6BUJOVa9bX3w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_198), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EQxs5xa4FNWtMfcvmFZ9cMA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_199), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5YbjRZxm0g3SrdnL73aQaw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_200), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MEALpIIbc0cKMcjQ7Xckzg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_201), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yUc5o9ax9c9asIVNkfprLRPpA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_202), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4JrnABFfF3UTQ3nO9a6mXzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_203), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bkAwkKoaz09cAQo9arQjGA0A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_204), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7N9bV9cjVBHs9ciAhz7vgdI9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_205), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QX9cU2fNK0jJrZNDQKnAycA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_206), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vTbVjc6faJqdBrTckFLLWQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_207), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___v4k9cDtOUzGyUHJbnJ7kQKg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_208), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0ym49cR6ES8k9bYWsnh1fELA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_209), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Jx78R9a9anGvjjocCaP8YgIg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_210), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___s0lnM9cZDB9bOREa4Fx1leBw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_211), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aT7p9bNEmP3LxrK3OhspnSw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_212), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mV75vMLuQ8rrQEUzNz6llA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_213), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jhVz7tKuf0heLM2D3nL0gw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_214), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___c4YKWXetPKpaUUF7Qft2gA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_215), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rCIIoKC0OrXhpuTFTIZn0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_216), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lXaYcLcHHuQ46VvpH6Qr2A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_217), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___duX6hgjmpJtFFdvJVuoafg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_218), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GNSb4l0oRsR1gu66azz1LQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_219), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LGbUtKnsZL8FcQiQN7sWEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_220), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___e8Xf9ajw9cRlpuqnFnlEuSpA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_221), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nVQhtKHyPC8pvPbUAUBU7A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_222), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9bI5GhokFUA9bgO9av819cgdBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_223), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qTicKO8EMC9cWGOyybIz4WQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_224), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yZHx0qMqBvbhmZ0fMuAP6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_225), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YQzyPnY5vKAqE2RyLX0cew)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_226), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cIILAsA6BeRrvHfloZIscg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_227), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IwDTuHqkGn7wW16ga2ktSg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_228), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lbkoHJP5AIgE86vP7MmlKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_229), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9b84wNYrm79cLYfx9bsPNHjPQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_230), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___K5ihI3kW9cFBh6sKlfEpJwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_231), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nEiBK88oEGnvYfkiei9cyJA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_232), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Psy1qActyEYmIhrRo2KkJA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_233), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cZzkwYphs086zWiuLotXLA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_234), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kPsYd8d9cco3hhqO7CEAFeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_235), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BbOsdTh4ZRNKmiISHDyg3A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_236), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Py40oiVtYdIelNuiQQjpjw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_237), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QzVlk7tEXgagMWC19aLvbkg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_238), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qxufH5vUl9aY2l9cFq39bnVwA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_239), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jiTCvQQpgMU0bTrdVuECiw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_240), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___n4OrLXC1r9a83k5wz2NoWxQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_241), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bJpxHYPJaxWBQn6QxwBA4w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_242), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fOn9b5Ij3ytw2Ui9a2CPI5zw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_243), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zJU3FoYOdJ9bmuODPmqtgdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_244), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1MXpJAdeOMc2XMg5H7t9aSg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_245), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___VNAv31sqVgxrd9aXeFF5wYw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_246), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MULS9c8dKz2mJ1U9a9cMyTCYw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_247), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5TB09c2Iz60T0YagbSbI5RQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_248), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NIzUqj4Mr1E3EKy0AkJaXQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_249), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yQdCkIARIVr9aqI8oVxi9cQw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_250), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___WYvjnWcyRjjjI0lasIi1YA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_251), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hR4oq6WdDjEl0JIvQtvUlg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_252), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___84GQPNcrIJtbrzuA7JnMPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_253), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SqZEI7bxySjmJX4GsXyvKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_254), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___c1f569aWpTd825BTnv9bq4Xg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_255), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ibl3qMPOrpGT2x8X7vmbeQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_256), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bBcuDHMXr6Kz1tr7BzD9aKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_257), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aDvifvZOUmduC6Unfm69bKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_258), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5kuxCbMO8PVJc9aJbXScUOQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_259), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Uu9cBz7dxPVDFhF9aLzWecyQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_260), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___WWt3il4CHPiYP10KdNLrWw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_261), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hc7hMh137dtaNdd3qw28EQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_262), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XWz49cQA2QiZaLkqHBU5L3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_263), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Au81R9a68Rv3gwlPtvDarPg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_264), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Yw741acxvsUs9cOX9cuiDj9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_265), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T9caGByKkBhaXSZ6fCJLIdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_266), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JmTWN8YiVKTZuvCYW2XNZA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_267), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Kbv8OIo8zpawh7SNMbfgkA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_268), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___B0OBOTOJQENvDd71LJ9b19bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_269), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___22ELRKd9bDuNug6qvIihS3A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_270), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ddrHnMlEhcHznkXv27msmQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_271), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yhJ9aDxHfJqHvWO0i6N9bukQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_272), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MLJpsW0DAZYB8lAgq09cUjg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_273), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8tWfSjtTOlDafxpQPvChAA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_274), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xKLwwPkFSVy2Dtn9cuJ78xw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_275), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hdRijZdoPR3UGq9aUw2zFDQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_276), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZjQc8bFVF8ePFYxjN0iVVg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_277), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SiqB8gWmdYKb4vtgqYrrMA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_278), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2Ixv9aZ9bvpNaVAVzYBJlUPg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_279), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HoXSbgR7plMG7Fef0fcy9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_280), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___H1Ma2EXqegHnMqzJZ4SA1g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_281), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jpXTCDNVjIi5r4hbHN5SVQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_282), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4L62Yp9bLO2ZDcvBG9bSvP9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_283), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MCSdS9cTdQvttqiM9azLzkDg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_284), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E9bSTz8DQ4tgiLV9avQjFgFA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_285), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3CQpPXVDiNqC3jKO8Juliw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_286), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___w50CkyHBltcyR8rWxttZCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_287), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fmEfDTfNDkVDxWi9c0O6D2g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_288), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___k9bgPIs43oLgxnk1l4TNQaw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_289), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5MqeIopvDuA9aozxL79cQ88g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_290), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Zp9bMZDO5tEkvVLTxiKsBkA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_291), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___j5FZyaqnqjc2dcsUkAp28Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_292), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EbvvG9awBeRKzx8xuBIb7TA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_293), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9a8besSQa09cOOt9b9cgdVwY9aQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_294), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oVKF7oq59cRGAaMpvWzNWbw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_295), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7ru3bwKuSx4Sc8ilsBmX3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_296), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MDIdJXTVckPj57aO7LMVgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_297), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vQDE0VOBftnrpkVsM9cme4w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_298), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9bmR9bM9b0qqEqU0QJKnmLQnA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_299), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___88tWbH31SmOWJjgJ7RnfHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_300), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___t1CB59bEwlxfHZhNwNNz1bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_301), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YbLM7ZajsWOFLl4iSo0Krg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_302), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rH7Ns9bqAnnfkukwBIlz9bKg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_303), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zx9ctq3Ffe9aysjoWhZOzevQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_304), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T9a21DAzFCa3OqRooKKtkqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_305), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Y4DThr9bpMbmoKpvgT1rYwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_306), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___811qrD9bMr21weOkImaKvIA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_307), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YNifhKTQWQRf1atK7E3Qmg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_308), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YfbBxPLyPvVS6F2y9bSUFIA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_309), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___OBvl4G6evYkvK9b9bClFGqNw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_310), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___pHsLkkx9bTDctZjmJqwCYRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_311), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ksH6NowTz9bh4eMOdyaiR1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_312), args, argsLen_0); return result; } static N_INLINE(void, nimSetMem__zxfKBYntu9cBapkhrCOk1fgmemory)(void* a, int v, NI size) { void* T1_; T1_ = (void*)0; T1_ = memset(a, v, ((size_t) (size))); } static N_INLINE(void, nimZeroMem)(void* p, NI size) { nimSetMem__zxfKBYntu9cBapkhrCOk1fgmemory(p, ((int) 0), size); } static N_INLINE(NCSTRING, nimToCStringConv)(NimStringDesc* s) { NCSTRING result; result = (NCSTRING)0; { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = (s == NIM_NIL); if (T3_) goto LA4_; T3_ = ((*s).Sup.len == ((NI) 0)); LA4_: ; if (!T3_) goto LA5_; result = ""; } goto LA1_; LA5_: ; { result = ((NCSTRING) ((*s).data)); } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__9bihNFg7Qajcg9arfx5cr9aHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, FILE* f) { NIM_BOOL result; tyArray__9bKy7UA2LOi2vzOViufaW1Q buf; NI bpos; NI blen; NI btotal; NI rtotal; NIM_BOOL T27_; NI T28_; { result = (NIM_BOOL)0; nimZeroMem((void*)buf, sizeof(tyArray__9bKy7UA2LOi2vzOViufaW1Q)); bpos = ((NI) 1024); blen = ((NI) 1024); btotal = ((NI) 0); rtotal = ((NI) 0); { NimStringDesc* s; s = (NimStringDesc*)0; { tySequence__WwUFq9cJ2xKRlsAWVEHyPRg* stack; if (!!((r == NIM_NIL))) goto LA4_; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) newSeq((&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), 1); asgnRef((void**) (&stack->data[0]), r); { while (1) { NI T8_; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* it; NI spos; NI slen; T8_ = (stack ? stack->Sup.len : 0); if (!(((NI) 0) < T8_)) goto LA7; it = pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem((&stack)); { while (1) { NI T11_; if (!!(((*it).left == NIM_NIL))) goto LA10; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) incrSeqV3((TGenericSeq*)(stack), (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_)); T11_ = stack->Sup.len++; asgnRef((void**) (&stack->data[T11_]), (*it).right); it = (*it).left; } LA10: ; } s = (*it).data; spos = ((NI) 0); slen = (s ? s->Sup.len : 0); rtotal += slen; { while (1) { NI n; if (!(spos < slen)) goto LA13; { if (!(bpos == blen)) goto LA16_; bpos = ((NI) 0); blen = readBuffer__Y9atVWUcVyKHG9aBP4D0P9czA(f, ((void*) ((&buf[(((NI) 0))- 0]))), ((NI) 1024)); btotal += blen; { if (!(blen == ((NI) 0))) goto LA20_; result = NIM_FALSE; goto BeforeRet_; } LA20_: ; } LA16_: ; n = (((NI)(blen - bpos) <= (NI)(slen - spos)) ? (NI)(blen - bpos) : (NI)(slen - spos)); { NIM_BOOL T24_; T24_ = (NIM_BOOL)0; T24_ = equalMem__9bGgqEk7RXXl6eqM9c1HdELAsystem(((void*) ((&buf[(bpos)- 0]))), ((void*) ((NI)(((NI) (nimToCStringConv(s))) + spos))), ((NI) (n))); if (!!(T24_)) goto LA25_; result = NIM_FALSE; goto BeforeRet_; } LA25_: ; spos += n; bpos += n; } LA13: ; } } LA7: ; } } LA4_: ; } T27_ = (NIM_BOOL)0; T28_ = (NI)0; T28_ = readBuffer__Y9atVWUcVyKHG9aBP4D0P9czA(f, ((void*) ((&buf[(((NI) 0))- 0]))), ((NI) 1)); T27_ = (T28_ == ((NI) 0)); if (!(T27_)) goto LA29_; T27_ = (btotal == rtotal); LA29_: ; result = T27_; }BeforeRet_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__Wiam9c8x73Mtmbj0r4Ppikg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, NimStringDesc* filename) { NIM_BOOL result; FILE* f; result = (NIM_BOOL)0; f = (FILE*)0; result = open__gq12VLhVO0NBzUTnGgz4nw(&f, filename, ((tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg) 0), ((NI) -1)); { if (!result) goto LA3_; result = equalsFile__9bihNFg7Qajcg9arfx5cr9aHA(r, f); close__fU6ZlJAtQ9bre04EDZLdGsA_3(f); } LA3_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, writeRope__LLRRC42xWBSkxzV9bsPu7lA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* head, NimStringDesc* filename) { NIM_BOOL result; FILE* f; result = (NIM_BOOL)0; f = (FILE*)0; { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = open__gq12VLhVO0NBzUTnGgz4nw(&f, filename, ((tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg) 1), ((NI) -1)); if (!T3_) goto LA4_; { if (!!((head == NIM_NIL))) goto LA8_; writeRope__FwuzOBq6SLlanVUstm8q9cA(f, head); } LA8_: ; close__fU6ZlJAtQ9bre04EDZLdGsA_3(f); result = NIM_TRUE; } goto LA1_; LA4_: ; { result = NIM_FALSE; } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T3CpMgcFHzYracJ80CUZBQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_313), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6wQcdZnh9aH29ay5rwY6M5fA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_314), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y39ant8iE9bjKB0kbkRCAibQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_315), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RKXvZR1cmZW5dfjtFQCG3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_316), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nEA33x9cMfuJw3ZiGbn25iw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_317), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0xK6HolrLvVFWil73hZYbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_318), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Z2c9cvs0wVVVqTEZ3Qwe9bfw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_319), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AxDJCYpgPoquRsZtiOnpRw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_320), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dU9cenGIcVUltUO1088LhYQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_321), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TLpRy9aDJ1Ni4vccOIoiMbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_322), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RzB0z3UV9bb4kXUEGyS9crRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_323), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Z1QwTAihBHnxe59cytXnhmw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_324), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XZnCV59at0sqX6ShEjlFLgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_325), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YLzwVVtf4fuPYZVeMQOa0Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_326), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CtS8L8cOLTsSuQ10mtHsvw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) NIM_NIL), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mPpmmd13MIZLTbd1oOdSkw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_327), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Th3qC4WgcAhWPSlLw7vZ9cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_328), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3RPy0XXevrEBMts1Mb9arGw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_329), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gqwqalZtiJtCgAF9bY5S6qQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_330), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___G9bYX9bu7ufcttiARCDUJ0qg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_331), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___W0CV9bE9bNiLgazfFZjoQCBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_332), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ecC7jlB6gBWrt0K9byHohPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_333), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hFzCKQOJ8Eao2AJk5HOvxA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_334), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___62079cK9bsws1aAJqEmAGo6w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_335), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hO1UTpWJhaojnhUyqfmgPQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_336), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___wlKCT75QSpBNooI9a2xvWeQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_8), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uD0SC9bUeWpB9cK7V1aBT9aNQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_337), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Uez7zQbKzeDFToq2Yh43bA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_338), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JbygmsEkVsyK85BPVFvwbg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_339), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FLXrAGf7HFTHIGh8Xuickg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_340), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hmfCuT8fgBmRlPR25L7ZOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_341), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HUHatwko3S0fuszXQAOSQQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_342), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gGKEcvCOVzpTQoSXzO01Dw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_343), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LMnNsJkYlruXHnF5LV9c3pA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_344), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uJ11bTQ8dBBAX88A2cyICw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_345), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2D3IUNoEAKKLxuRqVNosPQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_346), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___o7SGM9buciKf5BOjTvMKA7w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_347), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Ht0mWR3LosfEZ8SopJcmEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_348), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GweM9byC8cQI9cehUzlYVs5A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_349), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Xnze9a4kYSwHurdPnhyNGzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_350), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sGaOrvR5YSM9cGUajaqcNOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_351), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GF60428RM29aXV0LYutm9aOA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_352), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ciTj4q9cGhcXiXY9bPemZVvw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_353), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HLoe040Vi0LPzmTid9aLGdw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_354), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tnP9cO5PduJRSEeqtm9bocEg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_355), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S6XcU2shl8EfYxL7utXbwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_356), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3GvB8fuMNh8BXF8IoORCxw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_357), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RhAtD9c9aECDorIc8rDhMF9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_358), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CSdlEV0i9aXEHNuC1G9aIEbw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_359), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4SLS9cx2c8VCFIilepFlOeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_360), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___amX0pef5rA4JAmWZ6ZB2Nw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_361), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xAta147ahLKNrJMPPP5B6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_362), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sshAiIx49ba6saVSAWuyFuA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_363), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TmulmJw2SZspd0rz2PYvQw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_364), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UFeu00R8dNoyzL8vy54mnQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_365), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qYiwFpynEwFeSf3Aa2sS0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_366), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6xseTZmgyslBQb6RMm9b4wA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_367), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KsZXXO4zKP47iruPcSEryQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_368), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TUxzei0sBfo3GESRTg1T5w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_369), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ikDBM4Dyw9c2kuwAAswRyOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_370), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ht9cduX4yJQKi2Gi685ag5A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_371), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Wsnl5zC9cCEBdwJcHgpLf0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_372), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___deWmrKhbFG0MxH9cDr9cnhfQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_373), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HiCTlq0dXhMZvpDtUGWGQA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_374), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aagcnoz4kFWlzsoVgR9b0NQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_375), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oYhFcOWR4tEylepRJJLrlA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_376), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3RBmOS8xzFTxpuGVryQycg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_377), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___apXghcMDCUp9col7jN5spHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_378), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cNvJ1SVovK9b29bKmwKyiijw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_379), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0mbMVYCe5Qwl9aQOKV3sh3w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_380), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___03lrwELd9clj29bFkdXAVxkw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_381), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8croAZ6oMdSPXHbIisuppw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_382), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TDLJ9ciKDBoW4ouZs855Csg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_383), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Mk2KRdMWX4H3L9aBEG2elgQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_384), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___pFXgvxsz2L5f27ImZwJwzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_385), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___n9aTlv49bCxoRKQNZiWsaW2g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_386), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y3oNivo8px1XzxmB9b2OY5g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_387), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Pnqkcr360suaX84kwXMuCA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_388), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FA4ohw0aOufzzLhmw9aUAhA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_389), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SWZi8EY4Pz39bBPSp9cbtZMg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_390), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XaBXRInsoVU7DBc2WK8dzg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_391), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NdMO5d09brFwLfDc8ciTSqQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_392), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E62TlyqwqpEwqcA0YTjttw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_393), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___m4T7v0qnGpOgwmMenKcgwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_394), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SKTmZPSgcdPr3Du3ia9b9czg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_395), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ItxAXpnPzfUbYRPsHgKrPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_396), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ggqZXIgPaS71ubw22cYODw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_397), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LLnl4aDVJynim7LQvfJKLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_398), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Ob6yLhv7QvbU9bdZj8Nw2kA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_399), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qfsHROU9aHSaYGq3tpw1XDg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_400), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___j9bcJJvtd9bur0VZUQL3ibgA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_401), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___32ITt7hKDrhn9bXvKbmnE9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_402), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZAOkVi5SmgPcGpCSuSRXVA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_403), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___smDIOmjGgf8ZP9bfDyv43bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_404), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1jtIbjhXi2wH1iWPyC9bgAQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_405), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NPgb4kECDcV8MICSil6Rjw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_406), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cQHGAtgSLYV7mm9bnVGYGRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_407), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Q4LBu2cVl8IcNTrtxd6B6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_408), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___M36w8F9bFwighD3K39bvtVWw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_409), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Wm11wQtuJBQgTy9a39apz0eA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_410), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0bUw514mSumiNnSjkD0bqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_411), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6hxDi5nlebu1DFLqpYq5lw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_412), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GkWgkK8SyjrFfWjGRwKWrw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_413), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oubCLvBtU9aRB9bhG2vbCDeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_414), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KTcAQx04UE87HYZ48ZBm2A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_415), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Y6zpqvbZwK8tJZiKs9agbGw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_416), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2OGTIxEeE0xFVRpz5TxKyg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_417), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0xZtTB2eXM1dRd9aneL5VPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_418), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___amO46kEKgIeOmW50ayV6nA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_419), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3lABfXU9aXZsyfylYizY8KA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_420), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5JCQx3oDHEcLdsEz6Rx0Rw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_421), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dTtf7fil83VcW2Mkkr7scw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_422), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___88NG6Rr5xfTcA6hqLfZ2iw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_423), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1DWSTPxvqlc4A2xRDmjZDw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_424), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y5Z6ewsHLxj9ctzxTLPCLmw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_425), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CHBd5pGE9c8nq4KNqM8K48g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_426), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y2h2X887dhz5sEoD4C8ezQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_427), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dQfg2HrsVY6E7P22Nis1MA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_428), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0b2Bm7vpM8YAMKp9cuAwg3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_429), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1Hh3EN9c4pkzdKB09bo9c9aTBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_430), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AOSgPOjXfsLWEICRXv3U2g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_431), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gN4yb6p4ql6iVJOPAjLEJQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_432), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___WIg2bxfQLkmzIdOv1JkRqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_433), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3Klw9agVDELeF44OQ6PnRiA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_434), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LL6jCaqBGLwC1sCgmCAEhQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_435), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S9b9bs03lj0NJlhXUmrylsnA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_436), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fphSfWWyYSWLARtGIpYB9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_437), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___As9aDT7fkqstj16MQnIGPhA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_438), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___eAZ21NmzzIsugeSSkcxIkQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_439), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___D2dSwFjTnRSmeKOoMm6w0Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_440), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HlU9bV2X0HOPcGJnQlGm9c9aQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_441), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___p2lIQAdDBUpuVZML6ecUOg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_442), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5hzyGWCNjqgqPj0O7sSnkg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_443), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___l1wvVBeU1Nnie8cWddgPCA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_444), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yVZN2jQzbJwg3E9cehLff9cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_445), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E9br9b8BVYaWzg6CXcn9c6EXw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_446), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qPugJ1Nc2L1EdGwEF0AJ0Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_447), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HzZyrXo2QFynm1T8X76cCw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_448), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___O2nyVw4tGD6MMc6u7I9bH9cA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_449), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IQYZUimFiAV9axFM9c64hKjA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_450), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RCJU8UTq9cE0Jsi59anAbTIQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_451), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S6vmSaSCgC4V2L5H7OWeZg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_452), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Eqr9cgWCkrZrUG3sg0CawIQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_453), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9c1lq60gbfPY9cyjQN4YouTQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_454), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1nMXoOe6cENU7004pnh6wQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_455), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ALynLzo8zWvno8ZxASdm4A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_456), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tlkWMVJPsx9aWUbp8FMjQ4w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_457), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xPW5KjObCPL2lJmHFoqfjg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_458), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mTh2rYVPWUnI8B7kU3NWUg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_459), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aoMj8hrcFi4HlPDZ9a9alpig)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_460), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Yj64cHk9ajrzJI39bfpBfOVA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_461), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9bY6R9buTsrqJYQAuD39cegOA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_462), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___U9b6hkqS6N7XIWr0gy8z9bug)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_463), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2MwhwhkHOiavfXQl9aey8nA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_464), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JRV6DlpqdegYGLcFjNPv0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_465), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ryMkoQkM4zAjyp0800DrDQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_466), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___iW9bjdQoXkul7L0e76qo8XQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_467), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___i3z9am8Hzy69bSo575pRdzGw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_468), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SkAQPSnCyiRvin57XULW4A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_469), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Bym8FwH29aQE8fth9ar38yJQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_470), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CbbQqCp6itJgwKVRfTr69ag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_471), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HWgoOloM1oqcI9aZ9bEkoBhg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_472), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Anf1UHjOzz9aHgMOgtnEPZA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_473), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tDrtnFWakp63hyE9cfImgZw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_474), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JwpI2xnYNfR68HstfDi1yQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_475), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___23SvbIxPpf5MIOga79arr6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_476), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uVZXJGmbOGIG9bfkI4ZDwJQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_477), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UxL9a0Hh7Km0Z0DIk7hp9cBA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_478), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QxiH9aM0po7vA19b2s1CjdEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_479), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FZt89ajG3TKAhfL9aW4s7hcA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_480), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5GaE39bOOeQZy3EFOEIy5QA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_481), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SA9cvbR3uc9cP50nnaEBJctw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_482), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KweYGQ9bFYg76nmoxpk8ksA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_483), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AhY63HjLy2bPe9bslUNBuBQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_484), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3m7YwdrxIvOkmvfnm5JYSA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_485), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TEWiK8QWtRTCIQ9av7sW8LA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_486), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9an6bUHwpxqyL2kgNHX3MEg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_487), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kLwAORKb0c4oFgFTN9aEN8Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_488), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Qm29ctdy9c4sqKctTsqiBWIg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_489), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UyNt2Asj9aa2ScoGVo9cCnNw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_490), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xXvQyblNYV215UGR9cTka7Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_491), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LYjQOKn1i9ccw8AFlvPGkCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_492), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___THj0xNXkqJf6reD7exsGbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_493), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3oFXAbir9c7XcKzu9bpgAM9bA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_494), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4sbi76q7ZLqpKbD3pwJ59bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_495), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Q9cOQGrP4lOdbYHXMQ1yZtg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_496), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0AX4Q6cA8nOXUagvzFqt0A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_497), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qQ3g8SwjZoIFAay85NaiEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_498), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___M0TByFCTj9bbOkDSRpFz3LA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_499), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___OikfyLf8HmjI9auYLFoaVqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_500), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3KVF9aLACI1h11BqZrkzjNg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_501), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ial810twbEzfkHaHMFYNCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_502), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Z7wCJf0WipOQOQ4ZZNBIEw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_503), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Xpm9cGf2grEXdjAQV9arqWBQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_504), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sqxyWwlLrfrdyc9b3BINcXQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_505), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ztLQ2Orupb9b9b3KrCvoK9cbQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_506), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___PI6febxsdTbySkLsIEqHKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_507), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sGRyuC9caCxfdM1i8W4fjgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_508), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vWWA89aSvs5QwAFN4Jdr2IA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_509), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, writeRopeIfNotEqual__Wiam9c8x73Mtmbj0r4Ppikg_2)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, NimStringDesc* filename) { NIM_BOOL result; result = (NIM_BOOL)0; { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = equalsFile__Wiam9c8x73Mtmbj0r4Ppikg(r, filename); if (!!(T3_)) goto LA4_; result = writeRope__LLRRC42xWBSkxzV9bsPu7lA(r, filename); } goto LA1_; LA4_: ; { result = NIM_FALSE; } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lQuk161wRVxbYxfH80Iwcw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_510), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UQrwMIIitnm9cEflSXdCkPg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_511), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___A9aKFJUF6ZjJQfrcPHJigOQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_512), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8ehuHmXS8omgqFrdYMsPBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_513), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2Opo6JkHmCRmDA87qcGfvg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_514), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___C7jQ1fH79bR8HRQrbJjFKDg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_515), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2eu2gmgXiDUZkBgTVqD7pg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_516), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cCI1wZSoDB14achJW7ZFSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_517), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dkLAWa1dMAcGEAyfUZ59bRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_518), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___DuvwOyJJ9b2gpVM9cV7DCFSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_519), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4MBgNtJLOyqbjfGytl2OTw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_520), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___336bx9aXX7GZckfWQE5Jy3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_521), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IbsmsXdtDOH7pLpzh9cmAOA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_522), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cGelOO9b6sliTnobJf6XAsg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_523), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aNorSJCSJyyDo7w0s6eynA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_524), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BYRFs7dwiqyMIzbsx9cDq8Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_525), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TavFv5xK0dxxJCk9b4v34zg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_526), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9aAWQyBOqadJYgBT29bzliAw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_527), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zpFS2Xy9cmoAoqCFSUQj1gg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_528), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Nz9cwOtMmcX2gklRogKhyEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_529), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YGYo0XYmypYw3N26AYh7ug)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_530), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___e8Z4ajz6IErIB0a6mpq4Wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_531), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___eqn09cqDPu9csxGUOSa2untg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_532), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rZ5o6ziDKz4d3bfaN54Dgg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_533), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YGa4o1aenD9cjoU03CAgtqQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_534), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___b2PLtFwpZkVmYhHWvW4i1Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_535), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ctY4Nx9aQFC9bl9c2wbRLoFYA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_536), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xsFAphqq4CRpmuZ79bXVLrA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_537), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SSpcZv60d0mAp5H4Mb5hpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_538), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TtzOadDB4I9a89cWej19a2PNg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_539), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KKiSvh9a121M0uSQjcJhhMg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_540), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(void, compiler_ropesInit000)(void) { { nimRegisterGlobalMarker(TM__Vw9cfUOQOae9b9bzZBlucMZQg_3); gCacheTries__5GfZTThHPBfB9bjRZdFluBw = ((NI) 0); gCacheMisses__fLRm9am8S0daYBVNK6JKyBg = ((NI) 0); gCacheIntTries__opyfsNv023Md1P05mqsDew = ((NI) 0); } } N_LIB_PRIVATE N_NIMCALL(void, compiler_ropesDatInit000)(void) { static TNimNode* TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[4]; static TNimNode TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[5]; NTI__OFzf0kSiPTcNreUIeJgWVA_.size = sizeof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA); NTI__OFzf0kSiPTcNreUIeJgWVA_.kind = 17; NTI__OFzf0kSiPTcNreUIeJgWVA_.base = (&NTI__ytyiCJqK439aF9cIibuRVpAg_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[0] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1]; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].kind = 1; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, left); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].typ = (&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].name = "left"; TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[1] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2]; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].kind = 1; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, right); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].typ = (&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].name = "right"; TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[2] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3]; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].kind = 1; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, L); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].typ = (&NTI__rR5Bzr1D5krxoo1NcNyeMA_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].name = "L"; TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[3] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4]; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].kind = 1; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, data); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].typ = (&NTI__77mFvmsOLKik79ci2hXkHEg_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].name = "data"; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0].len = 4; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0].kind = 2; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0].sons = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[0]; NTI__OFzf0kSiPTcNreUIeJgWVA_.node = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0]; NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.size = sizeof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*); NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.kind = 22; NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.base = (&NTI__OFzf0kSiPTcNreUIeJgWVA_); NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.marker = Marker_tyRef__4hi0XQqK9aLiPuWT9acsXm9aQ; NTI__USLYl0Lpkimm4FABiJ3ldA_.size = sizeof(tyArray__USLYl0Lpkimm4FABiJ3ldA); NTI__USLYl0Lpkimm4FABiJ3ldA_.kind = 16; NTI__USLYl0Lpkimm4FABiJ3ldA_.base = (&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_); }
shear.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS H H EEEEE AAA RRRR % % SS H H E A A R R % % SSS HHHHH EEE AAAAA RRRR % % SS H H E A A R R % % SSSSS H H EEEEE A A R R % % % % % % MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The XShearImage() and YShearImage() methods are based on the paper "A Fast % Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics % Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar % method based on the Paeth paper written by Michael Halle of the Spatial % Imaging Group, MIT Media Lab. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob-private.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/decorate.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/list.h" #include "magick/matrix.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/nt-base-private.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/shear.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C r o p T o F i t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropToFitImage() crops the sheared image as determined by the bounding box % as defined by width and height and shearing angles. % % The format of the CropToFitImage method is: % % MagickBooleanType CropToFitImage(Image **image, % const MagickRealType x_shear,const MagickRealType x_shear, % const MagickRealType width,const MagickRealType height, % const MagickBooleanType rotate,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear, width, height: Defines a region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CropToFitImage(Image **image, const MagickRealType x_shear,const MagickRealType y_shear, const MagickRealType width,const MagickRealType height, const MagickBooleanType rotate,ExceptionInfo *exception) { Image *crop_image; PointInfo extent[4], min, max; RectangleInfo geometry, page; register ssize_t i; /* Calculate the rotated image size. */ extent[0].x=(double) (-width/2.0); extent[0].y=(double) (-height/2.0); extent[1].x=(double) width/2.0; extent[1].y=(double) (-height/2.0); extent[2].x=(double) (-width/2.0); extent[2].y=(double) height/2.0; extent[3].x=(double) width/2.0; extent[3].y=(double) height/2.0; for (i=0; i < 4; i++) { extent[i].x+=x_shear*extent[i].y; extent[i].y+=y_shear*extent[i].x; if (rotate != MagickFalse) extent[i].x+=x_shear*extent[i].y; extent[i].x+=(double) (*image)->columns/2.0; extent[i].y+=(double) (*image)->rows/2.0; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } geometry.x=(ssize_t) ceil(min.x-0.5); geometry.y=(ssize_t) ceil(min.y-0.5); geometry.width=(size_t) floor(max.x-min.x+0.5); geometry.height=(size_t) floor(max.y-min.y+0.5); page=(*image)->page; (void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page); crop_image=CropImage(*image,&geometry,exception); if (crop_image == (Image *) NULL) return(MagickFalse); crop_image->page=page; *image=DestroyImage(*image); *image=crop_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s k e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeskewImage() removes skew from the image. Skew is an artifact that % occurs in scanned images because of the camera being misaligned, % imperfections in the scanning or surface, or simply because the paper was % not placed completely flat when scanned. % % The amount of rotation calculated to deskew the image is saved in the % artifact "deskew:angle". % % If the artifact "deskew:auto-crop" is given the image will be automatically % cropped of the excess background. % % The format of the DeskewImage method is: % % Image *DeskewImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: separate background from foreground. % % o exception: return any errors or warnings in this structure. % */ static void RadonProjection(const Image *image,MatrixInfo *source_matrix, MatrixInfo *destination_matrix,const ssize_t sign,size_t *projection) { MatrixInfo *swap; register MatrixInfo *p, *q; register ssize_t x; size_t step; p=source_matrix; q=destination_matrix; for (step=1; step < GetMatrixColumns(p); step*=2) { for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step) { register ssize_t i; ssize_t y; unsigned short element, neighbor; for (i=0; i < (ssize_t) step; i++) { for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse) continue; } for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } for ( ; y < (ssize_t) GetMatrixRows(p); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } } } swap=p; p=q; q=swap; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_number_threads(image,image,GetMatrixColumns(p),1) #endif for (x=0; x < (ssize_t) GetMatrixColumns(p); x++) { register ssize_t y; size_t sum; sum=0; for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++) { ssize_t delta; unsigned short element, neighbor; if (GetMatrixElement(p,x,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse) continue; delta=(ssize_t) element-(ssize_t) neighbor; sum+=delta*delta; } projection[GetMatrixColumns(p)+sign*x-1]=sum; } } static MagickBooleanType RadonTransform(const Image *image, const double threshold,size_t *projection,ExceptionInfo *exception) { CacheView *image_view; MatrixInfo *destination_matrix, *source_matrix; MagickBooleanType status; register ssize_t i; size_t count, width; ssize_t y; unsigned char byte; unsigned short bits[256]; for (width=1; width < ((image->columns+7)/8); width<<=1) ; source_matrix=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short), exception); destination_matrix=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short), exception); if ((source_matrix == (MatrixInfo *) NULL) || (destination_matrix == (MatrixInfo *) NULL)) { if (destination_matrix != (MatrixInfo *) NULL) destination_matrix=DestroyMatrixInfo(destination_matrix); if (source_matrix != (MatrixInfo *) NULL) source_matrix=DestroyMatrixInfo(source_matrix); return(MagickFalse); } if (NullMatrix(source_matrix) == MagickFalse) { destination_matrix=DestroyMatrixInfo(destination_matrix); source_matrix=DestroyMatrixInfo(source_matrix); return(MagickFalse); } for (i=0; i < 256; i++) { byte=(unsigned char) i; for (count=0; byte != 0; byte>>=1) count+=byte & 0x01; bits[i]=(unsigned short) count; } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=(ssize_t) (image->columns+7)/8; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(p) < threshold) || ((MagickRealType) GetPixelGreen(p) < threshold) || ((MagickRealType) GetPixelBlue(p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrix,--i,y,&value); bit=0; byte=0; } p++; } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrix,--i,y,&value); } } RadonProjection(image,source_matrix,destination_matrix,-1,projection); (void) NullMatrix(source_matrix); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=0; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(p) < threshold) || ((MagickRealType) GetPixelGreen(p) < threshold) || ((MagickRealType) GetPixelBlue(p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrix,i++,y,&value); bit=0; byte=0; } p++; } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrix,i++,y,&value); } } RadonProjection(image,source_matrix,destination_matrix,1,projection); image_view=DestroyCacheView(image_view); destination_matrix=DestroyMatrixInfo(destination_matrix); source_matrix=DestroyMatrixInfo(source_matrix); return(MagickTrue); } static void GetImageBackgroundColor(Image *image,const ssize_t offset, ExceptionInfo *exception) { CacheView *image_view; MagickPixelPacket background; MagickRealType count; ssize_t y; /* Compute average background color. */ if (offset <= 0) return; GetMagickPixelPacket(image,&background); count=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; if ((y >= offset) && (y < ((ssize_t) image->rows-offset))) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { if ((x >= offset) && (x < ((ssize_t) image->columns-offset))) continue; background.red+=QuantumScale*GetPixelRed(p); background.green+=QuantumScale*GetPixelGreen(p); background.blue+=QuantumScale*GetPixelBlue(p); background.opacity+=QuantumScale*GetPixelOpacity(p); count++; p++; } } image_view=DestroyCacheView(image_view); image->background_color.red=ClampToQuantum((MagickRealType) QuantumRange* background.red/count); image->background_color.green=ClampToQuantum((MagickRealType) QuantumRange* background.green/count); image->background_color.blue=ClampToQuantum((MagickRealType) QuantumRange* background.blue/count); image->background_color.opacity=ClampToQuantum((MagickRealType) QuantumRange* background.opacity/count); } MagickExport Image *DeskewImage(const Image *image,const double threshold, ExceptionInfo *exception) { AffineMatrix affine_matrix; const char *artifact; double degrees; Image *clone_image, *crop_image, *deskew_image, *median_image; MagickBooleanType status; RectangleInfo geometry; register ssize_t i; size_t max_projection, *projection, width; ssize_t skew; /* Compute deskew angle. */ for (width=1; width < ((image->columns+7)/8); width<<=1) ; projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1), sizeof(*projection)); if (projection == (size_t *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); status=RadonTransform(image,threshold,projection,exception); if (status == MagickFalse) { projection=(size_t *) RelinquishMagickMemory(projection); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } max_projection=0; skew=0; for (i=0; i < (ssize_t) (2*width-1); i++) { if (projection[i] > max_projection) { skew=i-(ssize_t) width+1; max_projection=projection[i]; } } projection=(size_t *) RelinquishMagickMemory(projection); degrees=RadiansToDegrees(-atan((double) skew/width/8)); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Deskew angle: %g",degrees); /* Deskew image. */ clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); { char angle[MaxTextExtent]; (void) FormatLocaleString(angle,MaxTextExtent,"%g",degrees); (void) SetImageArtifact(clone_image,"deskew:angle",angle); } (void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod); affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0)))); affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.tx=0.0; affine_matrix.ty=0.0; artifact=GetImageArtifact(image,"deskew:auto-crop"); if (IsMagickTrue(artifact) == MagickFalse) { deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); return(deskew_image); } /* Auto-crop image. */ GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact), exception); deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); if (deskew_image == (Image *) NULL) return((Image *) NULL); median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception); if (median_image == (Image *) NULL) { deskew_image=DestroyImage(deskew_image); return((Image *) NULL); } geometry=GetImageBoundingBox(median_image,exception); median_image=DestroyImage(median_image); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: " "%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); crop_image=CropImage(deskew_image,&geometry,exception); deskew_image=DestroyImage(deskew_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e g r a l R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IntegralRotateImage() rotates the image an integral of 90 degrees. It % allocates the memory necessary for the new Image structure and returns a % pointer to the rotated image. % % The format of the IntegralRotateImage method is: % % Image *IntegralRotateImage(const Image *image,size_t rotations, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o rotations: Specifies the number of 90 degree rotations. % */ MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations, ExceptionInfo *exception) { #define RotateImageTag "Rotate/Image" CacheView *image_view, *rotate_view; Image *rotate_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; /* Initialize rotated image attributes. */ assert(image != (Image *) NULL); page=image->page; rotations%=4; if (rotations == 0) return(CloneImage(image,0,0,MagickTrue,exception)); if ((rotations == 1) || (rotations == 3)) rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); else rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (rotate_image == (Image *) NULL) return((Image *) NULL); /* Integral rotate the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); rotate_view=AcquireAuthenticCacheView(rotate_image,exception); switch (rotations) { case 1: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 90 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; for (tile_x=0; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict rotate_indexes; register PixelPacket *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (y=0; y < (ssize_t) width; y++) { register const PixelPacket *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t) (rotate_image->columns-(tile_y+height)),y+tile_x,height,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } tile_pixels=p+(height-1)*width+y; for (x=0; x < (ssize_t) height; x++) { *q++=(*tile_pixels); tile_pixels-=width; } rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view); if ((indexes != (IndexPacket *) NULL) && (rotate_indexes != (IndexPacket *) NULL)) { register const IndexPacket *magick_restrict tile_indexes; tile_indexes=indexes+(height-1)*width+y; for (x=0; x < (ssize_t) height; x++) { *rotate_indexes++=(*tile_indexes); tile_indexes-=width; } } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); break; } case 2: { /* Rotate 180 degrees. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict rotate_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1, exception); q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y- 1),image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view); q+=image->columns; for (x=0; x < (ssize_t) image->columns; x++) *--q=(*p++); if ((indexes != (IndexPacket *) NULL) && (rotate_indexes != (IndexPacket *) NULL)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(rotate_indexes+image->columns-x-1, GetPixelIndex(indexes+x)); sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif proceed=SetImageProgress(image,RotateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } case 3: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 270 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; for (tile_x=0; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict rotate_indexes; register PixelPacket *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (y=0; y < (ssize_t) width; y++) { register const PixelPacket *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+ rotate_image->rows-(tile_x+width)),height,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } tile_pixels=p+(width-1)-y; for (x=0; x < (ssize_t) height; x++) { *q++=(*tile_pixels); tile_pixels+=width; } rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view); if ((indexes != (IndexPacket *) NULL) && (rotate_indexes != (IndexPacket *) NULL)) { register const IndexPacket *magick_restrict tile_indexes; tile_indexes=indexes+(width-1)-y; for (x=0; x < (ssize_t) height; x++) { *rotate_indexes++=(*tile_indexes); tile_indexes+=width; } } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } default: break; } rotate_view=DestroyCacheView(rotate_view); image_view=DestroyCacheView(image_view); rotate_image->type=image->type; rotate_image->page=page; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + X S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % XShearImage() shears the image in the X direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a vertical % Y-axis. X shears will widen an image creating 'empty' triangles on the left % and right sides of the source image. % % The format of the XShearImage method is: % % MagickBooleanType XShearImage(Image *image,const MagickRealType degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A MagickRealType representing the shearing angle along the X % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType XShearImage(Image *image,const MagickRealType degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define XShearImageTag "XShear/Image" typedef enum { LEFT, RIGHT } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket background; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); /* X shear image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { MagickPixelPacket pixel, source, destination; MagickRealType area, displacement; register IndexPacket *magick_restrict indexes, *magick_restrict shear_indexes; register PixelPacket *magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1, exception); if (p == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); p+=x_offset; indexes+=x_offset; displacement=degrees*(MagickRealType) (y-height/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=RIGHT; else { displacement*=(-1.0); direction=LEFT; } step=(ssize_t) floor((double) displacement); area=(MagickRealType) (displacement-step); step++; pixel=background; GetMagickPixelPacket(image,&source); GetMagickPixelPacket(image,&destination); switch (direction) { case LEFT: { /* Transfer pixels left-to-right. */ if (step > x_offset) break; q=p-step; shear_indexes=indexes-step; for (i=0; i < (ssize_t) width; i++) { if ((x_offset+i) < step) { SetMagickPixelPacket(image,++p,++indexes,&pixel); q++; shear_indexes++; continue; } SetMagickPixelPacket(image,p,indexes,&source); MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity, &source,(MagickRealType) GetPixelOpacity(p),area,&destination); SetPixelPacket(image,&destination,q++,shear_indexes++); SetMagickPixelPacket(image,p++,indexes++,&pixel); } MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity, &background,(MagickRealType) background.opacity,area,&destination); SetPixelPacket(image,&destination,q++,shear_indexes++); for (i=0; i < (step-1); i++) SetPixelPacket(image,&background,q++,shear_indexes++); break; } case RIGHT: { /* Transfer pixels right-to-left. */ p+=width; indexes+=width; q=p+step; shear_indexes=indexes+step; for (i=0; i < (ssize_t) width; i++) { p--; indexes--; q--; shear_indexes--; if ((size_t) (x_offset+width+step-i) > image->columns) continue; SetMagickPixelPacket(image,p,indexes,&source); MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity, &source,(MagickRealType) GetPixelOpacity(p),area,&destination); SetPixelPacket(image,&destination,q,shear_indexes); SetMagickPixelPacket(image,p,indexes,&pixel); } MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity, &background,(MagickRealType) background.opacity,area,&destination); SetPixelPacket(image,&destination,--q,--shear_indexes); for (i=0; i < (step-1); i++) SetPixelPacket(image,&background,--q,--shear_indexes); break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_XShearImage) #endif proceed=SetImageProgress(image,XShearImageTag,progress++,height); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Y S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % YShearImage shears the image in the Y direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a % horizontal X-axis. Y shears will increase the height of an image creating % 'empty' triangles on the top and bottom of the source image. % % The format of the YShearImage method is: % % MagickBooleanType YShearImage(Image *image,const MagickRealType degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A MagickRealType representing the shearing angle along the Y % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType YShearImage(Image *image,const MagickRealType degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define YShearImageTag "YShear/Image" typedef enum { UP, DOWN } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket background; ssize_t x; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); /* Y Shear image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,width,1) #endif for (x=0; x < (ssize_t) width; x++) { ssize_t step; MagickPixelPacket pixel, source, destination; MagickRealType area, displacement; register IndexPacket *magick_restrict indexes, *magick_restrict shear_indexes; register ssize_t i; register PixelPacket *magick_restrict p, *magick_restrict q; ShearDirection direction; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows, exception); if (p == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); p+=y_offset; indexes+=y_offset; displacement=degrees*(MagickRealType) (x-width/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=DOWN; else { displacement*=(-1.0); direction=UP; } step=(ssize_t) floor((double) displacement); area=(MagickRealType) (displacement-step); step++; pixel=background; GetMagickPixelPacket(image,&source); GetMagickPixelPacket(image,&destination); switch (direction) { case UP: { /* Transfer pixels top-to-bottom. */ if (step > y_offset) break; q=p-step; shear_indexes=indexes-step; for (i=0; i < (ssize_t) height; i++) { if ((y_offset+i) < step) { SetMagickPixelPacket(image,++p,++indexes,&pixel); q++; shear_indexes++; continue; } SetMagickPixelPacket(image,p,indexes,&source); MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity, &source,(MagickRealType) GetPixelOpacity(p),area,&destination); SetPixelPacket(image,&destination,q++,shear_indexes++); SetMagickPixelPacket(image,p++,indexes++,&pixel); } MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity, &background,(MagickRealType) background.opacity,area,&destination); SetPixelPacket(image,&destination,q++,shear_indexes++); for (i=0; i < (step-1); i++) SetPixelPacket(image,&background,q++,shear_indexes++); break; } case DOWN: { /* Transfer pixels bottom-to-top. */ p+=height; indexes+=height; q=p+step; shear_indexes=indexes+step; for (i=0; i < (ssize_t) height; i++) { p--; indexes--; q--; shear_indexes--; if ((size_t) (y_offset+height+step-i) > image->rows) continue; SetMagickPixelPacket(image,p,indexes,&source); MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity, &source,(MagickRealType) GetPixelOpacity(p),area,&destination); SetPixelPacket(image,&destination,q,shear_indexes); SetMagickPixelPacket(image,p,indexes,&pixel); } MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity, &background,(MagickRealType) background.opacity,area,&destination); SetPixelPacket(image,&destination,--q,--shear_indexes); for (i=0; i < (step-1); i++) SetPixelPacket(image,&background,--q,--shear_indexes); break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_YShearImage) #endif proceed=SetImageProgress(image,YShearImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearImage() creates a new image that is a shear_image copy of an existing % one. Shearing slides one edge of an image along the X or Y axis, creating % a parallelogram. An X direction shear slides an edge along the X axis, % while a Y direction shear slides an edge along the Y axis. The amount of % the shear is controlled by a shear angle. For X direction shears, x_shear % is measured relative to the Y axis, and similarly, for Y direction shears % y_shear is measured relative to the X axis. Empty triangles left over from % shearing the image are filled with the background color defined by member % 'background_color' of the image.. ShearImage() allocates the memory % necessary for the new Image structure and returns a pointer to the new image. % % ShearImage() is based on the paper "A Fast Algorithm for General Raster % Rotatation" by Alan W. Paeth. % % The format of the ShearImage method is: % % Image *ShearImage(const Image *image,const double x_shear, % const double y_shear,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear: Specifies the number of degrees to shear the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearImage(const Image *image,const double x_shear, const double y_shear,ExceptionInfo *exception) { Image *integral_image, *shear_image; MagickBooleanType status; PointInfo shear; RectangleInfo border_info, bounds; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); /* Initialize shear angle. */ integral_image=CloneImage(image,0,0,MagickTrue,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0)))); shear.y=tan(DegreesToRadians(fmod(y_shear,360.0))); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse) { InheritException(exception,&integral_image->exception); integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->matte == MagickFalse) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel); /* Compute image size. */ bounds.width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5); bounds.x=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)- image->columns)/2.0-0.5); bounds.y=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*bounds.width)- image->rows)/2.0-0.5); /* Surround image with border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; shear_image=BorderImage(integral_image,&border_info,exception); integral_image=DestroyImage(integral_image); if (shear_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Shear the image. */ if (shear_image->matte == MagickFalse) (void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel); status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x, (ssize_t) (shear_image->rows-image->rows)/2,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t) (shear_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType) image->columns,(MagickRealType) image->rows,MagickFalse,exception); shear_image->matte=image->matte; shear_image->compose=image->compose; shear_image->page.width=0; shear_image->page.height=0; if (status == MagickFalse) shear_image=DestroyImage(shear_image); return(shear_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearRotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. ShearRotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % ShearRotateImage() is based on the paper "A Fast Algorithm for General % Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a % similar method based on the Paeth paper written by Michael Halle of the % Spatial Imaging Group, MIT Media Lab. % % The format of the ShearRotateImage method is: % % Image *ShearRotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearRotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *integral_image, *rotate_image; MagickBooleanType status; MagickRealType angle; PointInfo shear; RectangleInfo border_info, bounds; size_t height, rotations, shear_width, width; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=degrees-360.0*(ssize_t) (degrees/360.0); if (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; /* Calculate shear equations. */ integral_image=IntegralRotateImage(image,rotations,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse) { InheritException(exception,&integral_image->exception); integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->matte == MagickFalse) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel); /* Compute maximum bounds for 3 shear operations. */ width=integral_image->columns; height=integral_image->rows; bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5); bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5); shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+ bounds.width+0.5); bounds.x=(ssize_t) floor((double) ((shear_width > bounds.width) ? width : bounds.width-shear_width+2)/2.0+0.5); bounds.y=(ssize_t) floor(((double) bounds.height-height+2)/2.0+0.5); /* Surround image with a border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; rotate_image=BorderImage(integral_image,&border_info,exception); integral_image=DestroyImage(integral_image); if (rotate_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Rotate the image. */ status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t) (rotate_image->rows-height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t) (rotate_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t) (rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows- bounds.height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width, (MagickRealType) height,MagickTrue,exception); rotate_image->matte=image->matte; rotate_image->compose=image->compose; rotate_image->page.width=0; rotate_image->page.height=0; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); }
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundFactor(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double factor; Image *edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; register const Quantum *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } GetPixelInfoPixel(image,p,&background); artifact=GetImageArtifact(image,"trim:background-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); factor=0.0; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) factor++; p+=GetPixelChannels(edge_image); } } factor/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(factor); } static inline double GetMinEdgeBackgroundFactor(const EdgeInfo *edge) { double factor; factor=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(factor); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_factor, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_factor=GetMinEdgeBackgroundFactor(&edge); for ( ; background_factor < percent_background; background_factor=GetMinEdgeBackgroundFactor(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_factor) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_factor) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundFactor(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_factor) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundFactor(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_factor) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundFactor(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; PixelInfo target[3], zero; RectangleInfo bounds; register const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,p,&target[0]); GetPixelInfo(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[1]); GetPixelInfo(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[2]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,p,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) { /*(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename);*/ } else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[i])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const Quantum *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType bilevel; register ssize_t x; register const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); bilevel=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { bilevel=MagickFalse; break; } p+=GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view=DestroyCacheView(image_view); return(bilevel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IdentifyImageMonochrome(image,exception) != MagickFalse) return(BilevelType); if (IdentifyImageGray(image,exception) != UndefinedType) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const Quantum *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { Quantum *depth_map; register ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=depth_map[ScaleQuantumToMap(q[i])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
tstile.h
void tstile() { int c0,c1,c2,c3,c5,c6,c7,c9,c11,c10,c4,c12; if(1==1) for( c0 = 0; c0 <= floord(N - 2, 8); c0 += 1) #pragma omp parallel for schedule(dynamic, 1) shared(c0) private(c1,c3,c4,c6,c10) for( c1 = (c0 + 1) / 2; c1 <= min(c0, (N - 1) / 16); c1 += 1) for( c3 = 16 * c0 - 16 * c1 + 1; c3 <= min(min(N - 1, 16 * c1 + 15), 16 * c0 - 16 * c1 + 16); c3 += 1) { for( c4 = 0; c4 <= c0 - c1; c4 += 1) for( c6 = max(-N + 16 * c1 + 1, -N + c3 + 1); c6 <= min(0, -N + 16 * c1 + 16); c6 += 1) { for( c10 = 16 * c4; c10 <= min(c3 - 1, 16 * c4 + 15); c10 += 1) S[(-c6)][(c3-c6)] = MAX(S[(-c6)][c10+(-c6)] + S[c10+(-c6)+1][(c3-c6)], S[(-c6)][(c3-c6)]); if (c1 + c4 == c0 && 16 * c0 + c6 + 15 >= 16 * c1 + c3) S[(-c6)][(c3-c6)] = MAX(S[(-c6)][(c3-c6)], S[(-c6)+1][(c3-c6)-1] + can_pair(RNA, (-c6), (c3-c6))); } for( c4 = max(c0 - c1 + 1, -c1 + (N + c3) / 16 - 1); c4 <= min((N - 1) / 16, -c1 + (N + c3 - 1) / 16); c4 += 1) for( c6 = max(max(-N + 16 * c1 + 1, -N + c3 + 1), c3 - 16 * c4 - 15); c6 <= min(-N + 16 * c1 + 16, c3 - 16 * c4); c6 += 1) S[(-c6)][(c3-c6)] = MAX(S[(-c6)][(c3-c6)], S[(-c6)+1][(c3-c6)-1] + can_pair(RNA, (-c6), (c3-c6))); } }
updater_basemaker-inl.h
/*! * Copyright 2014 by Contributors * \file updater_basemaker-inl.h * \brief implement a common tree constructor * \author Tianqi Chen */ #ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include "xgboost/base.h" #include "xgboost/tree_updater.h" #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "./param.h" #include "../common/sync.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" namespace xgboost { namespace tree { /*! * \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker: public TreeUpdater { public: void Init(const std::vector<std::pair<std::string, std::string> >& args) override { param.InitAllowUnknown(args); } protected: // helper to collect and query feature meta information struct FMetaHelper { public: /*! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix* p_fmat, const RegTree& tree) { fminmax.resize(tree.param.num_feature * 2); std::fill(fminmax.begin(), fminmax.end(), -std::numeric_limits<bst_float>::max()); // start accumulating statistics dmlc::DataIter<ColBatch>* iter = p_fmat->ColIterator(); iter->BeforeFirst(); while (iter->Next()) { const ColBatch& batch = iter->Value(); for (bst_uint i = 0; i < batch.size; ++i) { const bst_uint fid = batch.col_index[i]; const ColBatch::Inst& c = batch[i]; if (c.length != 0) { fminmax[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax[fid * 2 + 0]); fminmax[fid * 2 + 1] = std::max(c[c.length - 1].fvalue, fminmax[fid * 2 + 1]); } } } rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax), fminmax.size()); } // get feature type, 0:empty 1:binary 2:real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax[fid * 2]; bst_float b = fminmax[fid * 2 + 1]; if (a == -std::numeric_limits<bst_float>::max()) return 0; if (-a == b) { return 1; } else { return 2; } } inline bst_float MaxValue(bst_uint fid) const { return fminmax[fid *2 + 1]; } inline void SampleCol(float p, std::vector<bst_uint> *p_findex) const { std::vector<bst_uint> &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax.size(); i += 2) { const bst_uint fid = static_cast<bst_uint>(i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } unsigned n = static_cast<unsigned>(p * findex.size()); std::shuffle(findex.begin(), findex.end(), common::GlobalRandom()); findex.resize(n); // sync the findex if it is subsample std::string s_cache; common::MemoryBufferStream fc(&s_cache); dmlc::Stream& fs = fc; if (rabit::GetRank() == 0) { fs.Write(findex); } rabit::Broadcast(&s_cache, 0); fs.Read(&findex); } private: std::vector<bst_float> fminmax; }; // ------static helper functions ------ // helper function to get to next level of the tree /*! \brief this is helper function for row based data*/ inline static int NextLevel(const RowBatch::Inst &inst, const RegTree &tree, int nid) { const RegTree::Node &n = tree[nid]; bst_uint findex = n.split_index(); for (unsigned i = 0; i < inst.length; ++i) { if (findex == inst[i].index) { if (inst[i].fvalue < n.split_cond()) { return n.cleft(); } else { return n.cright(); } } } return n.cdefault(); } /*! \brief get number of omp thread in current context */ inline static int get_nthread() { int nthread; #pragma omp parallel { nthread = omp_get_num_threads(); } return nthread; } // ------class member helpers--------- /*! \brief initialize temp data structure */ inline void InitData(const std::vector<bst_gpair> &gpair, const DMatrix &fmat, const RegTree &tree) { CHECK_EQ(tree.param.num_nodes, tree.param.num_roots) << "TreeMaker: can only grow new tree"; const std::vector<unsigned> &root_index = fmat.info().root_index; { // setup position position.resize(gpair.size()); if (root_index.size() == 0) { std::fill(position.begin(), position.end(), 0); } else { for (size_t i = 0; i < position.size(); ++i) { position[i] = root_index[i]; CHECK_LT(root_index[i], (unsigned)tree.param.num_roots) << "root index exceed setting"; } } // mark delete for the deleted datas for (size_t i = 0; i < position.size(); ++i) { if (gpair[i].hess < 0.0f) position[i] = ~position[i]; } // mark subsample if (param.subsample < 1.0f) { std::bernoulli_distribution coin_flip(param.subsample); auto& rnd = common::GlobalRandom(); for (size_t i = 0; i < position.size(); ++i) { if (gpair[i].hess < 0.0f) continue; if (!coin_flip(rnd)) position[i] = ~position[i]; } } } { // expand query qexpand.reserve(256); qexpand.clear(); for (int i = 0; i < tree.param.num_roots; ++i) { qexpand.push_back(i); } this->UpdateNode2WorkIndex(tree); } } /*! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree &tree) { std::vector<int> newnodes; for (size_t i = 0; i < qexpand.size(); ++i) { const int nid = qexpand[i]; if (!tree[nid].is_leaf()) { newnodes.push_back(tree[nid].cleft()); newnodes.push_back(tree[nid].cright()); } } // use new nodes for qexpand qexpand = newnodes; this->UpdateNode2WorkIndex(tree); } // return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position[ridx]; return pid < 0 ? ~pid : pid; } // encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position[ridx] < 0) { position[ridx] = ~nid; } else { position[ridx] = nid; } } /*! * \brief this is helper function uses column based data structure, * reset the positions to the lastest one * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void ResetPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { // set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /*! * \brief helper function to set the non-leaf positions to default direction. * This function can be applied multiple times and will get the same result. * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void SetDefaultPostion(DMatrix *p_fmat, const RegTree &tree) { // set rest of instances to default position const RowSet &rowset = p_fmat->buffered_rowset(); // set default direct nodes to default // for leaf nodes that are not fresh, mark then to ~nid, // so that they are ignored in future statistics collection const bst_omp_uint ndata = static_cast<bst_omp_uint>(rowset.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < ndata; ++i) { const bst_uint ridx = rowset[i]; const int nid = this->DecodePosition(ridx); if (tree[nid].is_leaf()) { // mark finish when it is not a fresh leaf if (tree[nid].cright() == -1) { position[ridx] = ~nid; } } else { // push to default branch if (tree[nid].default_left()) { this->SetEncodePosition(ridx, tree[nid].cleft()); } else { this->SetEncodePosition(ridx, tree[nid].cright()); } } } } /*! * \brief this is helper function uses column based data structure, * to CORRECT the positions of non-default directions that WAS set to default * before calling this function. * \param batch The column batch * \param sorted_split_set The set of index that contains split solutions. * \param tree the regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const ColBatch& batch, const std::vector<bst_uint> &sorted_split_set, const RegTree &tree) { for (size_t i = 0; i < batch.size; ++i) { ColBatch::Inst col = batch[i]; const bst_uint fid = batch.col_index[i]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const bst_omp_uint ndata = static_cast<bst_omp_uint>(col.length); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].is_leaf()); int pid = tree[nid].parent(); // go back to parent, correct those who are not default if (!tree[nid].is_root() && tree[pid].split_index() == fid) { if (fvalue < tree[pid].split_cond()) { this->SetEncodePosition(ridx, tree[pid].cleft()); } else { this->SetEncodePosition(ridx, tree[pid].cright()); } } } } } } /*! * \brief this is helper function uses column based data structure, * \param nodes the set of nodes that contains the split to be used * \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector<int> &nodes, const RegTree &tree, std::vector<unsigned>* out_split_set) { std::vector<unsigned>& fsplits = *out_split_set; fsplits.clear(); // step 1, classify the non-default data into right places for (size_t i = 0; i < nodes.size(); ++i) { const int nid = nodes[i]; if (!tree[nid].is_leaf()) { fsplits.push_back(tree[nid].split_index()); } } std::sort(fsplits.begin(), fsplits.end()); fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /*! * \brief this is helper function uses column based data structure, * update all positions into nondefault branch, if any, ignore the default branch * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { std::vector<unsigned> fsplits; this->GetSplitSet(nodes, tree, &fsplits); dmlc::DataIter<ColBatch> *iter = p_fmat->ColIterator(fsplits); while (iter->Next()) { const ColBatch &batch = iter->Value(); for (size_t i = 0; i < batch.size; ++i) { ColBatch::Inst col = batch[i]; const bst_uint fid = batch.col_index[i]; const bst_omp_uint ndata = static_cast<bst_omp_uint>(col.length); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); // go back to parent, correct those who are not default if (!tree[nid].is_leaf() && tree[nid].split_index() == fid) { if (fvalue < tree[nid].split_cond()) { this->SetEncodePosition(ridx, tree[nid].cleft()); } else { this->SetEncodePosition(ridx, tree[nid].cright()); } } } } } } /*! \brief helper function to get statistics from a tree */ template<typename TStats> inline void GetNodeStats(const std::vector<bst_gpair> &gpair, const DMatrix &fmat, const RegTree &tree, std::vector< std::vector<TStats> > *p_thread_temp, std::vector<TStats> *p_node_stats) { std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp; const MetaInfo &info = fmat.info(); thread_temp.resize(this->get_nthread()); p_node_stats->resize(tree.param.num_nodes); #pragma omp parallel { const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats(param)); for (size_t i = 0; i < qexpand.size(); ++i) { const unsigned nid = qexpand[i]; thread_temp[tid][nid].Clear(); } } const RowSet &rowset = fmat.buffered_rowset(); // setup position const bst_omp_uint ndata = static_cast<bst_omp_uint>(rowset.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < ndata; ++i) { const bst_uint ridx = rowset[i]; const int nid = position[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair, info, ridx); } } // sum the per thread statistics together for (size_t j = 0; j < qexpand.size(); ++j) { const int nid = qexpand[j]; TStats &s = (*p_node_stats)[nid]; s.Clear(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /*! \brief common helper data structure to build sketch */ struct SketchEntry { /*! \brief total sum of amount to be met */ double sum_total; /*! \brief statistics used in the sketch */ double rmin, wmin; /*! \brief last seen feature value */ bst_float last_fvalue; /*! \brief current size of sketch */ double next_goal; // pointer to the sketch to put things in common::WXQuantileSketch<bst_float, bst_float> *sketch; // initialize the space inline void Init(unsigned max_size) { next_goal = -1.0f; rmin = wmin = 0.0f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /*! * \brief push a new element to sketch * \param fvalue feature value, comes in sorted ascending order * \param w weight * \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0f) { next_goal = 0.0f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0f + 1e-5f; } else { next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /*! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /*! \brief training parameter of tree grower */ TrainParam param; /*! \brief queue of nodes to be expanded */ std::vector<int> qexpand; /*! * \brief map active node to is working index offset in qexpand, * can be -1, which means the node is node actively expanding */ std::vector<int> node2workindex; /*! * \brief position of each instance in the tree * can be negative, which means this position is no longer expanding * see also Decode/EncodePosition */ std::vector<int> position; private: inline void UpdateNode2WorkIndex(const RegTree &tree) { // update the node2workindex std::fill(node2workindex.begin(), node2workindex.end(), -1); node2workindex.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand.size(); ++i) { node2workindex[qexpand[i]] = static_cast<int>(i); } } }; } // namespace tree } // namespace xgboost #endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
GB_unop__trunc_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__trunc_fp64_fp64) // op(A') function: GB (_unop_tran__trunc_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = trunc (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = trunc (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = trunc (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TRUNC || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__trunc_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = trunc (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = trunc (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__trunc_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
thread_thread_threadid.c
// RUN: %libomp-compile-and-run // REQUIRES: abt #include "omp_testsuite.h" #include <string.h> #include <stdio.h> int test_thread_thread_threadid(int num_threads) { int i, vals[num_threads]; memset(vals, 0, sizeof(int) * num_threads); omp_set_max_active_levels(2); #pragma omp parallel for num_threads(num_threads) for (i = 0; i < num_threads; i++) { int omp_thread_id = omp_get_thread_num(); ABT_thread abt_thread; ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread)); int local_vals[num_threads]; memset(local_vals, 0, sizeof(int) * num_threads); int j; #pragma omp parallel for num_threads(num_threads) for (j = 0; j < num_threads; j++) { int l2_omp_thread_id = omp_get_thread_num(); ABT_thread l2_abt_thread; ABT_EXIT_IF_FAIL(ABT_thread_self(&l2_abt_thread)); // Context switching in OpenMP. #pragma omp taskyield int l2_omp_thread_id2 = omp_get_thread_num(); if (l2_omp_thread_id == l2_omp_thread_id2) { local_vals[j] += 1; } ABT_thread l2_abt_thread2; ABT_EXIT_IF_FAIL(ABT_thread_self(&l2_abt_thread2)); ABT_bool l2_abt_thread_equal; ABT_EXIT_IF_FAIL(ABT_thread_equal(l2_abt_thread, l2_abt_thread2, &l2_abt_thread_equal)); if (l2_abt_thread_equal == ABT_TRUE) { local_vals[j] += 2; } // Context switching in Argobots. ABT_EXIT_IF_FAIL(ABT_thread_yield()); int l2_omp_thread_id3 = omp_get_thread_num(); if (l2_omp_thread_id2 == l2_omp_thread_id3) { local_vals[j] += 4; } } // Check child threads. int child_fail = 0; for (j = 0; j < num_threads; j++) { if (local_vals[i] != 7) { child_fail = 1; } } if (!child_fail) { vals[i] += 1; } int omp_thread_id2 = omp_get_thread_num(); if (omp_thread_id == omp_thread_id2) { vals[i] += 2; } ABT_thread abt_thread2; ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread2)); ABT_bool abt_thread_equal; ABT_EXIT_IF_FAIL(ABT_thread_equal(abt_thread, abt_thread2, &abt_thread_equal)); if (abt_thread_equal == ABT_TRUE) { vals[i] += 4; } } for (i = 0; i < num_threads; i++) { if (vals[i] != 7) { printf("vals[%d] == %d\n", i, vals[i]); return 0; } } return 1; } int main() { int i, num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_thread_thread_threadid(i + 1)) { num_failed++; } } return num_failed; }
impliedBarrier.c
// OpenMP Implied Barrier Example // Inclusions #include <omp.h> #include <stdio.h> #include <stdlib.h> // Main int main( int argc, char** argv ) { int i = 0; // Loop Iterator int n = 0; // Number of Iterations double start = 0.0; // Start Time double middle = 0.0; // Middle Time double end = 0.0; // End Time double for1 = 0.0; // For Loop 1 Time double for2 = 0.0; // For Loop 2 Time double total = 0.0; // Total Time // Parallel Region #pragma omp parallel \ shared( n ) \ private( i ) { start = omp_get_wtime( ); // Get Start Time #pragma omp for // Parallelize For Loop for( i = 0; i < n; i++ ) { // Iterate Through printf( "Thread %d of %d - Iteration %d\n", omp_get_thread_num( ), omp_get_max_threads( ), i ); } middle = omp_get_wtime( ); // Get Middle Time #pragma omp for // Parallelize For Loop for( i = 0; i < n; i++ ) { // Iterate Through printf( "Thread %d of %d - Iteration %d\n", omp_get_thread_num( ), omp_get_max_threads( ), i ); } end = omp_get_wtime( ); // Get End Time } // Calculate Time for1 = middle - start; for2 = end - middle; total = end - start; // Display Time printf( "For Loop 1: %0.9lf\n", for1 ); printf( "For Loop 2: %0.9lf\n", for2 ); printf( "Total Time: %0.9lf\n", total ); return 0; } // End impliedBarrier.c - EWG SDG
sw-full-ls.c
/* $Id: sw-full-ls.c,v 1.16 2009/06/12 21:27:35 rumble Exp $ */ #include <assert.h> #include <ctype.h> #include <errno.h> #include <math.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <unistd.h> #include <zlib.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/time.h> #include <limits.h> #include "../common/fasta.h" #include "../common/sw-full-common.h" #include "../common/sw-full-ls.h" #include "../common/util.h" #include "../common/time_counter.h" typedef struct swcell { int score_north; int score_west; int score_northwest; int8_t back_north; int8_t back_west; int8_t back_northwest; } swcell; #define FROM_NORTH_NORTH 0x1 #define FROM_NORTH_NORTHWEST 0x2 #define FROM_WEST_NORTHWEST 0x3 #define FROM_WEST_WEST 0x4 #define FROM_NORTHWEST_NORTH 0x5 #define FROM_NORTHWEST_NORTHWEST 0x6 #define FROM_NORTHWEST_WEST 0x7 #define BACK_INSERTION 0x1 #define BACK_DELETION 0x2 #define BACK_MATCH_MISMATCH 0x3 static int initialised; static int8_t *db, *qr; static int dblen, qrlen; static int a_gap_open, a_gap_ext; static int b_gap_open, b_gap_ext; static int match, mismatch; static struct swcell *swmatrix; static int8_t *backtrace; static char *dbalign, *qralign; static int anchor_width; /* statistics */ static uint64_t swcells, swinvocs; static time_counter sw_tc; #pragma omp threadprivate(initialised,db,qr,dblen,qrlen,a_gap_open,a_gap_ext,b_gap_open,b_gap_ext,\ match,mismatch,swmatrix,backtrace,dbalign,qralign,anchor_width,sw_tc,swcells,swinvocs) inline static void init_cell(int idx, int local_alignment) { if (local_alignment) { swmatrix[idx].score_northwest = 0; swmatrix[idx].score_north = -b_gap_open; swmatrix[idx].score_west = -a_gap_open; } else { swmatrix[idx].score_northwest = -INT_MAX/2; swmatrix[idx].score_north = -INT_MAX/2; swmatrix[idx].score_west = -INT_MAX/2; } swmatrix[idx].back_northwest = 0; swmatrix[idx].back_north = 0; swmatrix[idx].back_west = 0; } #ifdef DEBUG_SW static void print_sw(int lena, int lenb) { int i,j; printf(" %5s ","-"); for (j=1; j< lenb+1; j++) { printf("%5c ",base_translate(qr[j-1],false)); } printf("\n"); //rows for (i=0; i<lena+1; i++) { //cols if (i==0) { printf(" - "); } else { printf("%5c ",base_translate(db[i-1],false)); } for (j=0; j<lenb+1; j++) { swcell curr=swmatrix[j*(lena+1)+i]; int tmp=0; tmp=MAX(curr.score_north,curr.score_west); tmp=MAX(tmp,curr.score_northwest); if (tmp<-1000) { printf("%5d ",-99); } else { //printf("%5d ",tmp); printf("%5d/%5d/%5d ", curr.score_north,curr.score_west,curr.score_northwest); } } printf("\n"); } } static void print_sw_backtrace(int lena, int lenb) { int i,j; printf(" %5s ","-"); for (j=1; j< lenb+1; j++) { printf("%5c ",base_translate(qr[j-1],false)); } printf("\n"); //rows for (i=0; i<lena+1; i++) { //cols if (i==0) { printf(" - "); } else { printf("%5c ",base_translate(db[i-1],false)); } for (j=0; j<lenb+1; j++) { swcell curr=swmatrix[j*(lena+1)+i]; int btrace[3]={0,0,0}; int maxscore=0; maxscore=MAX(curr.score_north,curr.score_west); maxscore=MAX(maxscore,curr.score_northwest); if (curr.score_west==maxscore) { btrace[0]=curr.back_west; } if (curr.score_northwest==maxscore) { btrace[1]=curr.back_northwest; } if (curr.score_north==maxscore) { btrace[2]=curr.back_north; } printf("%d/%d/%d ",btrace[0],btrace[1],btrace[2]); } printf("\n"); } } #endif static int full_sw(int lena, int lenb, int threshscore, int maxscore, int *iret, int *jret, bool revcmpl, struct anchor * anchors, int anchors_cnt, int local_alignment) { //fprintf(stderr,"Executing full_sw\n"); int max_i=0; int max_j=0; int i, j; //int sw_band, ne_band; int score, ms, a_go, a_ge, b_go, b_ge, tmp; int8_t tmp2; struct anchor rectangle; /* shut up gcc */ j = 0; score = 0; a_go = a_gap_open; a_ge = a_gap_ext; b_go = b_gap_open; b_ge = b_gap_ext; if (anchors != NULL && anchor_width >= 0) { anchor_join(anchors, anchors_cnt, &rectangle); anchor_widen(&rectangle, anchor_width); } else { struct anchor tmp_anchors[2]; tmp_anchors[0].x = 0; tmp_anchors[0].y = (lenb * match - threshscore) / match; tmp_anchors[0].length = 1; tmp_anchors[0].width = 1; tmp_anchors[1].x = lena-1; tmp_anchors[1].y = lenb-1-tmp_anchors[0].y; tmp_anchors[1].length = 1; tmp_anchors[1].width = 1; anchor_join(tmp_anchors, 2, &rectangle); } for (j = 0; j < lena + 1; j++) { init_cell(j,1); } /* for (i = 0; i < lenb + 1; i++) { int idx = i * (lena + 1); swmatrix[idx].score_northwest = 0; swmatrix[idx].score_north = 0; swmatrix[idx].score_west = 0; swmatrix[idx].back_northwest = 0; swmatrix[idx].back_north = 0; swmatrix[idx].back_west = 0; } */ /* * Figure out our band. * We can actually skip computation of a significant number of * cells, which could never be part of an alignment corresponding * to our threshhold score. */ //sw_band = ((lenb * match - threshscore + match - 1) / match) + 1; //ne_band = lena - (lenb - sw_band); for (i = 0; i < lenb; i++) { /* * computing row i of virtual matrix, stored in row i+1 */ int x_min, x_max; anchor_get_x_range(&rectangle, lena, lenb, i, &x_min, &x_max); if (!local_alignment) { //init_cell((i + 1) * (lena + 1) + (x_min - 1) + 1, x_min == 0 ? 1 : 0); init_cell((i + 1) * (lena + 1) + (x_min - 1) + 1, 0); } else { init_cell((i + 1) * (lena + 1) + (x_min - 1) + 1,1); } //if (x_min > 0) { //} swcells += x_max - x_min + 1; for (j = x_min; j <= x_max; j++) { /* * computing column j of virtual matrix, stored in column j+1 */ struct swcell *cell_nw, *cell_n, *cell_w, *cell_cur; cell_nw = &swmatrix[i * (lena + 1) + j]; cell_n = cell_nw + 1; cell_w = cell_nw + (lena + 1); cell_cur = cell_w + 1; /* banding */ //if (i >= sw_band + j) { //memset(cell_cur, 0, sizeof(*cell_cur)); //continue; //} //if (j >= ne_band + i) { //memset(cell_cur, 0, sizeof(*cell_cur)); //break; //} /* * northwest */ ms = (db[j] == qr[i]) ? match : mismatch; if (!revcmpl) { tmp = cell_nw->score_northwest + ms; tmp2 = FROM_NORTHWEST_NORTHWEST; if (cell_nw->score_north + ms > tmp) { tmp = cell_nw->score_north + ms; tmp2 = FROM_NORTHWEST_NORTH; } if (cell_nw->score_west + ms > tmp) { tmp = cell_nw->score_west + ms; tmp2 = FROM_NORTHWEST_WEST; } } else { tmp = cell_nw->score_west + ms; tmp2 = FROM_NORTHWEST_WEST; if (cell_nw->score_north + ms > tmp) { tmp = cell_nw->score_north + ms; tmp2 = FROM_NORTHWEST_NORTH; } if (cell_nw->score_northwest + ms > tmp) { tmp = cell_nw->score_northwest + ms; tmp2 = FROM_NORTHWEST_NORTHWEST; } } if (tmp <= 0 && local_alignment) tmp = tmp2 = 0; cell_cur->score_northwest = tmp; cell_cur->back_northwest = tmp2; /* * north */ if (!revcmpl) { tmp = cell_n->score_northwest - b_go - b_ge; tmp2 = FROM_NORTH_NORTHWEST; if (cell_n->score_north - b_ge > tmp) { tmp = cell_n->score_north - b_ge; tmp2 = FROM_NORTH_NORTH; } } else { tmp = cell_n->score_north - b_ge; tmp2 = FROM_NORTH_NORTH; if (cell_n->score_northwest - b_go - b_ge > tmp) { tmp = cell_n->score_northwest - b_go - b_ge; tmp2 = FROM_NORTH_NORTHWEST; } } if (tmp <= 0 && local_alignment) tmp = tmp2 = 0; cell_cur->score_north = tmp; cell_cur->back_north = tmp2; /* * west */ if (!revcmpl) { tmp = cell_w->score_northwest - a_go - a_ge; tmp2 = FROM_WEST_NORTHWEST; if (cell_w->score_west - a_ge > tmp) { tmp = cell_w->score_west - a_ge; tmp2 = FROM_WEST_WEST; } } else { tmp = cell_w->score_west - a_ge; tmp2 = FROM_WEST_WEST; if (cell_w->score_northwest - a_go - a_ge > tmp) { tmp = cell_w->score_northwest - a_go - a_ge; tmp2 = FROM_WEST_NORTHWEST; } } if (tmp <= 0 && local_alignment) tmp = tmp2 = 0; cell_cur->score_west = tmp; cell_cur->back_west = tmp2; /* * max score */ if (local_alignment || i==lenb-1) { int tmp; tmp = MAX(cell_cur->score_north, cell_cur->score_northwest); tmp = MAX(tmp, cell_cur->score_west); if (tmp>score) { score=tmp; max_i = i; max_j = j; } } if (score == maxscore && local_alignment) break; } if (score == maxscore && local_alignment) break; if (i+1 < lenb) { int next_x_min, next_x_max; anchor_get_x_range(&rectangle, lena, lenb, i+1, &next_x_min, &next_x_max); for (j = x_max + 1; j <= next_x_max; j++) { init_cell((i + 1) * (lena + 1) + (j + 1),local_alignment); } } } *iret = max_i; *jret = max_j; #ifdef DEBUG_SW fprintf(stderr,"Returning i = %d, j= %d, score= %d , maxscore=%d\n",i,j,score,maxscore); print_sw(lena,lenb); print_sw_backtrace(lena,lenb); fprintf(stderr,"Final score is %d\n",score); #endif if (score == maxscore || !local_alignment) return score; else if (anchors != NULL) return full_sw(lena, lenb, threshscore, maxscore, iret, jret, revcmpl, NULL, 0,local_alignment); else { assert(0); return 0; } } /* * Fill in the backtrace in order to do a pretty printout. * * Returns the beginning matrix cell (i, j) in 'sfr->read_start' and * 'sfr->genome_start'. * * The return value is the first valid offset in the backtrace buffer. */ static int do_backtrace(int lena, int i, int j, struct sw_full_results *sfr) { struct swcell *cell; int k, from, fromscore; cell = &swmatrix[(i + 1) * (lena + 1) + j + 1]; from = cell->back_northwest; fromscore = cell->score_northwest; if (cell->score_west > fromscore) { from = cell->back_west; fromscore = cell->score_west; } if (cell->score_north > fromscore) from = cell->back_north; assert(from != 0); /* fill out the backtrace */ k = (dblen + qrlen) - 1; while (i >= 0 && j >= 0) { //printf("Got cell %d , %d for backtrace\n",i+1,j+1); assert(k >= 0); cell = NULL; /* common operations first */ switch (from) { case FROM_NORTH_NORTH: case FROM_NORTH_NORTHWEST: backtrace[k] = BACK_DELETION; sfr->deletions++; sfr->read_start = i--; break; case FROM_WEST_WEST: case FROM_WEST_NORTHWEST: backtrace[k] = BACK_INSERTION; sfr->insertions++; sfr->genome_start = j--; break; case FROM_NORTHWEST_NORTH: case FROM_NORTHWEST_NORTHWEST: case FROM_NORTHWEST_WEST: backtrace[k] = BACK_MATCH_MISMATCH; if (db[j] == qr[i]) sfr->matches++; else sfr->mismatches++; sfr->read_start = i--; sfr->genome_start = j--; break; default: fprintf(stderr, "INTERNAL ERROR: from = %d\n", from); assert(0); } /* continue backtrace (nb: i and j have already been changed) */ cell = &swmatrix[(i + 1) * (lena + 1) + j + 1]; switch (from) { case FROM_NORTH_NORTH: from = cell->back_north; break; case FROM_NORTH_NORTHWEST: from = cell->back_northwest; break; case FROM_WEST_WEST: from = cell->back_west; break; case FROM_WEST_NORTHWEST: from = cell->back_northwest; break; case FROM_NORTHWEST_NORTH: from = cell->back_north; break; case FROM_NORTHWEST_NORTHWEST: from = cell->back_northwest; break; case FROM_NORTHWEST_WEST: from = cell->back_west; break; default: fprintf(stderr, "INTERNAL ERROR: from = %d\n", from); assert(0); } k--; if (from == 0) break; } return (k + 1); } /* * Pretty print our alignment of 'db' and 'qr' in 'dbalign' and 'qralign'. * * i, j represent the beginning cell in the matrix. * k is the first valid offset in the backtrace buffer. */ static void pretty_print(int i, int j, int k) { char *d, *q; int l, done; d = dbalign; q = qralign; done = 0; for (l = k; l < (dblen + qrlen); l++) { switch (backtrace[l]) { case BACK_DELETION: *d++ = '-'; *q++ = base_translate(qr[i++], false); break; case BACK_INSERTION: *d++ = base_translate(db[j++], false); *q++ = '-'; break; case BACK_MATCH_MISMATCH: *d++ = base_translate(db[j++], false); *q++ = base_translate(qr[i++], false); break; default: done = 1; } if (done) break; } *d = *q = '\0'; } int sw_full_ls_cleanup(void) { free(db); free(qr); free(swmatrix); free(backtrace); free(dbalign); free(qralign); return (0); } int sw_full_ls_setup(int _dblen, int _qrlen, int _a_gap_open, int _a_gap_ext, int _b_gap_open, int _b_gap_ext, int _match, int _mismatch, bool reset_stats, int _anchor_width) { dblen = _dblen; db = (int8_t *)malloc(dblen * sizeof(db[0])); if (db == NULL) return (1); qrlen = _qrlen; qr = (int8_t *)malloc(qrlen * sizeof(qr[0])); if (qr == NULL) return (1); swmatrix = (struct swcell *)malloc((dblen + 1) * (qrlen + 1) * sizeof(swmatrix[0])); if (swmatrix == NULL) return (1); backtrace = (int8_t *)malloc((dblen + qrlen) * sizeof(backtrace[0])); if (backtrace == NULL) return (1); dbalign = (char *)malloc((dblen + qrlen + 1) * sizeof(dbalign[0])); if (dbalign == NULL) return (1); qralign = (char *)malloc((dblen + qrlen + 1) * sizeof(dbalign[0])); if (qralign == NULL) return (1); a_gap_open = -(_a_gap_open); a_gap_ext = -(_a_gap_ext); b_gap_open = -(_b_gap_open); b_gap_ext = -(_b_gap_ext); match = _match; mismatch = _mismatch; if (reset_stats) { swcells = swinvocs = 0; sw_tc.type = DEF_FAST_TIME_COUNTER; sw_tc.counter = 0; } anchor_width = _anchor_width; initialised = 1; return (0); } void sw_full_ls_stats(uint64_t *invocs, uint64_t *cells, double *secs) { if (invocs != NULL) *invocs = swinvocs; if (cells != NULL) *cells = swcells; if (secs != NULL) *secs = time_counter_get_secs(&sw_tc); } void sw_full_ls(uint32_t *genome, int goff, int glen, uint32_t *read, int rlen, int threshscore, int maxscore, struct sw_full_results *sfr, bool revcmpl, struct anchor * anchors, int anchors_cnt, int local_alignment) { struct sw_full_results scratch; int i, j, k; //llint before = rdtsc(), after; TIME_COUNTER_START(sw_tc); if (!initialised) abort(); swinvocs++; assert(glen > 0 && glen <= dblen); assert(rlen > 0 && rlen <= qrlen); if (sfr == NULL) { sfr = &scratch; memset(sfr, 0, sizeof(*sfr)); } memset(backtrace, 0, (dblen + qrlen) * sizeof(backtrace[0])); dbalign[0] = qralign[0] = '\0'; for (i = 0; i < glen; i++) db[i] = (int8_t)EXTRACT(genome, goff + i); for (i = 0; i < rlen; i++) qr[i] = (int8_t)EXTRACT(read, i); sfr->score = full_sw(glen, rlen, threshscore, maxscore, &i, &j,revcmpl, anchors, anchors_cnt,local_alignment); k = do_backtrace(glen, i, j, sfr); pretty_print(sfr->read_start, sfr->genome_start, k); sfr->gmapped = j - sfr->genome_start + 1; sfr->genome_start += goff; sfr->rmapped = i - sfr->read_start + 1; sfr->dbalign = xstrdup(dbalign); sfr->qralign = xstrdup(qralign); //swcells += (glen * rlen); //after = rdtsc(); //swticks += MAX(after - before, 0); TIME_COUNTER_STOP(sw_tc); }
core_claset.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlaset.c, normal z -> c, Fri Sep 28 17:38:22 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" // for memset function #include <string.h> /***************************************************************************//** * * @ingroup core_laset * * Sets the elements of the matrix A on the diagonal * to beta and on the off-diagonals to alpha * ******************************************************************************* * * @param[in] uplo * Specifies which elements of the matrix are to be set * - PlasmaUpper: Upper part of A is set; * - PlasmaLower: Lower part of A is set; * - PlasmaUpperLower: ALL elements of A are set. * * @param[in] m * The number of rows of the matrix A. m >= 0. * * @param[in] n * The number of columns of the matrix A. n >= 0. * * @param[in] alpha * The constant to which the off-diagonal elements are to be set. * * @param[in] beta * The constant to which the diagonal elements are to be set. * * @param[in,out] A * On entry, the m-by-n tile A. * On exit, A has been set accordingly. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_claset(plasma_enum_t uplo, int m, int n, plasma_complex32_t alpha, plasma_complex32_t beta, plasma_complex32_t *A, int lda) { if (alpha == 0.0 && beta == 0.0 && uplo == PlasmaGeneral && m == lda) { // Use memset to zero continuous memory. memset((void*)A, 0, (size_t)m*n*sizeof(plasma_complex32_t)); } else { // Use LAPACKE_claset_work to initialize the matrix. LAPACKE_claset_work(LAPACK_COL_MAJOR, lapack_const(uplo), m, n, alpha, beta, A, lda); } } /******************************************************************************/ void plasma_core_omp_claset(plasma_enum_t uplo, int mb, int nb, int i, int j, int m, int n, plasma_complex32_t alpha, plasma_complex32_t beta, plasma_complex32_t *A) { #pragma omp task depend(out:A[0:mb*nb]) plasma_core_claset(uplo, m, n, alpha, beta, A+i+j*mb, mb); }
3D.c
#include <stdio.h> #include <time.h> #include <assert.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <string.h> #define STR_SIZE (256) #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 /* chip parameters */ float t_chip = 0.0005; float chip_height = 0.016; float chip_width = 0.016; /* ambient temperature, assuming no package at all */ float amb_temp = 80.0; void fatal(char *s) { fprintf(stderr, "Error: %s\n", s); } void readinput(float *vect, int grid_rows, int grid_cols, int layers, char *file) { int i,j,k; FILE *fp; char str[STR_SIZE]; float val; if( (fp = fopen(file, "r" )) ==0 ) fatal( "The file was not opened" ); for (i=0; i <= grid_rows-1; i++) for (j=0; j <= grid_cols-1; j++) for (k=0; k <= layers-1; k++) { if (fgets(str, STR_SIZE, fp) == NULL) fatal("Error reading file\n"); if (feof(fp)) fatal("not enough lines in file"); if ((sscanf(str, "%f", &val) != 1)) fatal("invalid file format"); vect[i*grid_cols+j+k*grid_rows*grid_cols] = val; } fclose(fp); } void writeoutput(float *vect, int grid_rows, int grid_cols, int layers, char *file) { int i,j,k, index=0; FILE *fp; char str[STR_SIZE]; if( (fp = fopen(file, "w" )) == 0 ) printf( "The file was not opened\n" ); for (i=0; i < grid_rows; i++) for (j=0; j < grid_cols; j++) for (k=0; k < layers; k++) { sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j+k*grid_rows*grid_cols]); fputs(str,fp); index++; } fclose(fp); } void computeTempCPU(float *pIn, float* tIn, float *tOut, int nx, int ny, int nz, float Cap, float Rx, float Ry, float Rz, float dt, int numiter) { float ce, cw, cn, cs, ct, cb, cc; float stepDivCap = dt / Cap; ce = cw =stepDivCap/ Rx; cn = cs =stepDivCap/ Ry; ct = cb =stepDivCap/ Rz; cc = 1.0 - (2.0*ce + 2.0*cn + 3.0*ct); int c,w,e,n,s,b,t; int x,y,z; int i = 0; do{ for(z = 0; z < nz; z++) for(y = 0; y < ny; y++) for(x = 0; x < nx; x++) { c = x + y * nx + z * nx * ny; w = (x == 0) ? c : c - 1; e = (x == nx - 1) ? c : c + 1; n = (y == 0) ? c : c - nx; s = (y == ny - 1) ? c : c + nx; b = (z == 0) ? c : c - nx * ny; t = (z == nz - 1) ? c : c + nx * ny; tOut[c] = tIn[c]*cc + tIn[n]*cn + tIn[s]*cs + tIn[e]*ce + tIn[w]*cw + tIn[t]*ct + tIn[b]*cb + (dt/Cap) * pIn[c] + ct*amb_temp; } float *temp = tIn; tIn = tOut; tOut = temp; i++; } while(i < numiter); } float accuracy(float *arr1, float *arr2, int len) { float err = 0.0; int i; #pragma omp parallel for reduction(+:err) for(i = 0; i < len; i++) { err += (arr1[i]-arr2[i]) * (arr1[i]-arr2[i]); } return (float)sqrt(err/len); } void computeTempOMP(float *pIn, float* tIn, float *tOut, int nx, int ny, int nz, float Cap, float Rx, float Ry, float Rz, float dt, int numiter) { float ce, cw, cn, cs, ct, cb, cc; float stepDivCap = dt / Cap; ce = cw =stepDivCap/ Rx; cn = cs =stepDivCap/ Ry; ct = cb =stepDivCap/ Rz; cc = 1.0 - (2.0*ce + 2.0*cn + 3.0*ct); { int count = 0; float *tIn_t = tIn; float *tOut_t = tOut; printf("%d threads running\n", omp_get_num_threads()); do { int z; #pragma omp parallel for for (z = 0; z < nz; z++) { int y; for (y = 0; y < ny; y++) { int x; for (x = 0; x < nx; x++) { int c, w, e, n, s, b, t; c = x + y * nx + z * nx * ny; w = (x == 0) ? c : c - 1; e = (x == nx-1) ? c : c + 1; n = (y == 0) ? c : c - nx; s = (y == ny-1) ? c : c + nx; b = (z == 0) ? c : c - nx * ny; t = (z == nz-1) ? c : c + nx * ny; tOut_t[c] = cc * tIn_t[c] + cw * tIn_t[w] + ce * tIn_t[e] + cs * tIn_t[s] + cn * tIn_t[n] + cb * tIn_t[b] + ct * tIn_t[t]+(dt/Cap) * pIn[c] + ct*amb_temp; } } } float *t = tIn_t; tIn_t = tOut_t; tOut_t = t; count++; } while (count < numiter); } return; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <rows/cols> <layers> <iterations> <powerFile> <tempFile> <outputFile>\n", argv[0]); fprintf(stderr, "\t<rows/cols> - number of rows/cols in the grid (positive integer)\n"); fprintf(stderr, "\t<layers> - number of layers in the grid (positive integer)\n"); fprintf(stderr, "\t<iteration> - number of iterations\n"); fprintf(stderr, "\t<powerFile> - name of the file containing the initial power values of each cell\n"); fprintf(stderr, "\t<tempFile> - name of the file containing the initial temperature values of each cell\n"); fprintf(stderr, "\t<outputFile - output file\n"); exit(1); } int main(int argc, char** argv) { if (argc != 7) { usage(argc,argv); } char *pfile, *tfile, *ofile;// *testFile; int iterations = atoi(argv[3]); pfile = argv[4]; tfile = argv[5]; ofile = argv[6]; //testFile = argv[7]; int numCols = atoi(argv[1]); int numRows = atoi(argv[1]); int layers = atoi(argv[2]); /* calculating parameters*/ float dx = chip_height/numRows; float dy = chip_width/numCols; float dz = t_chip/layers; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * dx * dy; float Rx = dy / (2.0 * K_SI * t_chip * dx); float Ry = dx / (2.0 * K_SI * t_chip * dy); float Rz = dz / (K_SI * dx * dy); // cout << Rx << " " << Ry << " " << Rz << endl; float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float dt = PRECISION / max_slope; float *powerIn, *tempOut, *tempIn, *tempCopy;// *pCopy; // float *d_powerIn, *d_tempIn, *d_tempOut; int size = numCols * numRows * layers; powerIn = (float*)calloc(size, sizeof(float)); tempCopy = (float*)malloc(size * sizeof(float)); tempIn = (float*)calloc(size,sizeof(float)); tempOut = (float*)calloc(size, sizeof(float)); //pCopy = (float*)calloc(size,sizeof(float)); float* answer = (float*)calloc(size, sizeof(float)); // outCopy = (float*)calloc(size, sizeof(float)); readinput(powerIn,numRows, numCols, layers,pfile); readinput(tempIn, numRows, numCols, layers, tfile); memcpy(tempCopy,tempIn, size * sizeof(float)); struct timeval start, stop; float time; gettimeofday(&start,NULL); computeTempOMP(powerIn, tempIn, tempOut, numCols, numRows, layers, Cap, Rx, Ry, Rz, dt,iterations); gettimeofday(&stop,NULL); time = (stop.tv_usec-start.tv_usec)*1.0e-6 + stop.tv_sec - start.tv_sec; computeTempCPU(powerIn, tempCopy, answer, numCols, numRows, layers, Cap, Rx, Ry, Rz, dt,iterations); float acc = accuracy(tempOut,answer,numRows*numCols*layers); printf("Time: %.3f (s)\n",time); printf("Accuracy: %e\n",acc); writeoutput(tempOut,numRows, numCols, layers, ofile); free(tempIn); free(tempOut); free(powerIn); return 0; }
detector.c
#include "darknet.h" static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90}; void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear) { list *options = read_data_cfg(datacfg); char *train_images = option_find_str(options, "train", "data/train.list"); char *backup_directory = option_find_str(options, "backup", "/backup/"); srand(time(0)); char *base = basecfg(cfgfile); printf("%s\n", base); float avg_loss = -1; network **nets = calloc(ngpus, sizeof(network)); srand(time(0)); int seed = rand(); int i; for(i = 0; i < ngpus; ++i){ srand(seed); #ifdef GPU cuda_set_device(gpus[i]); #endif nets[i] = load_network(cfgfile, weightfile, clear); nets[i]->learning_rate *= ngpus; } srand(time(0)); network *net = nets[0]; int imgs = net->batch * net->subdivisions * ngpus; printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); data train, buffer; layer l = net->layers[net->n - 1]; int classes = l.classes; float jitter = l.jitter; list *plist = get_paths(train_images); //int N = plist->size; char **paths = (char **)list_to_array(plist); load_args args = get_base_args(net); args.coords = l.coords; args.paths = paths; args.n = imgs; args.m = plist->size; args.classes = classes; args.jitter = jitter; args.num_boxes = l.max_boxes; args.d = &buffer; args.type = DETECTION_DATA; //args.type = INSTANCE_DATA; args.threads = 64; pthread_t load_thread = load_data(args); double time; int count = 0; //while(i*imgs < N*120){ while(get_current_batch(net) < net->max_batches){ if(l.random && count++%10 == 0){ printf("Resizing\n"); int dim = (rand() % 10 + 10) * 32; if (get_current_batch(net)+200 > net->max_batches) dim = 608; //int dim = (rand() % 4 + 16) * 32; printf("%d\n", dim); args.w = dim; args.h = dim; pthread_join(load_thread, 0); train = buffer; free_data(train); load_thread = load_data(args); #pragma omp parallel for for(i = 0; i < ngpus; ++i){ resize_network(nets[i], dim, dim); } net = nets[0]; } time=what_time_is_it_now(); pthread_join(load_thread, 0); train = buffer; load_thread = load_data(args); /* int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[10] + 1 + k*5); if(!b.x) break; printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h); } */ /* int zz; for(zz = 0; zz < train.X.cols; ++zz){ image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]); int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[zz] + k*5, 1); printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); draw_bbox(im, b, 1, 1,0,0); } show_image(im, "truth11"); cvWaitKey(0); save_image(im, "truth11"); } */ printf("Loaded: %lf seconds\n", what_time_is_it_now()-time); time=what_time_is_it_now(); float loss = 0; #ifdef GPU if(ngpus == 1){ loss = train_network(net, train); } else { loss = train_networks(nets, ngpus, train, 4); } #else loss = train_network(net, train); #endif if (avg_loss < 0) avg_loss = loss; avg_loss = avg_loss*.9 + loss*.1; i = get_current_batch(net); printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs); if(i%100==0){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s.backup", backup_directory, base); save_weights(net, buff); } if(i%10000==0 || (i < 1000 && i%100 == 0)){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i); save_weights(net, buff); } free_data(train); } #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_final.weights", backup_directory, base); save_weights(net, buff); } static int get_coco_image_id(char *filename) { char *p = strrchr(filename, '/'); char *c = strrchr(filename, '_'); if(c) p = c; return atoi(p+1); } static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h) { int i, j; int image_id = get_coco_image_id(image_path); for(i = 0; i < num_boxes; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; float bx = xmin; float by = ymin; float bw = xmax - xmin; float bh = ymax - ymin; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]); } } } void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1; float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1; float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1; float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1; if (xmin < 1) xmin = 1; if (ymin < 1) ymin = 1; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j], xmin, ymin, xmax, ymax); } } } void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ int class = j; if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class], xmin, ymin, xmax, ymax); } } } void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 2); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); image input = make_image(net->w, net->h, net->c*2); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1); flip_image(val_resized[t]); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1); network_predict(net, input.data); int w = val[t].w; int h = val[t].h; int num = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num); if (nms) do_nms_sort(dets, num, classes, nms); if (coco){ print_cocos(fp, path, dets, num, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h); } else { print_detector_detections(fps, id, dets, num, classes, w, h); } free_detections(dets, num); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); float *X = val_resized[t].data; network_predict(net, X); int w = val[t].w; int h = val[t].h; int nboxes = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes); if (nms) do_nms_sort(dets, nboxes, classes, nms); if (coco){ print_cocos(fp, path, dets, nboxes, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h); } else { print_detector_detections(fps, id, dets, nboxes, classes, w, h); } free_detections(dets, nboxes); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector_recall(char *cfgfile, char *weightfile) { network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths("data/coco_val_5k.list"); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int j, k; int m = plist->size; int i=0; float thresh = .001; float iou_thresh = .5; float nms = .4; int total = 0; int correct = 0; int proposals = 0; float avg_iou = 0; for(i = 0; i < m; ++i){ char *path = paths[i]; image orig = load_image_color(path, 0, 0); image sized = resize_image(orig, net->w, net->h); char *id = basecfg(path); network_predict(net, sized.data); int nboxes = 0; detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes); if (nms) do_nms_obj(dets, nboxes, 1, nms); char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int num_labels = 0; box_label *truth = read_boxes(labelpath, &num_labels); for(k = 0; k < nboxes; ++k){ if(dets[k].objectness > thresh){ ++proposals; } } for (j = 0; j < num_labels; ++j) { ++total; box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h}; float best_iou = 0; for(k = 0; k < l.w*l.h*l.n; ++k){ float iou = box_iou(dets[k].bbox, t); if(dets[k].objectness > thresh && iou > best_iou){ best_iou = iou; } } avg_iou += best_iou; if(best_iou > iou_thresh){ ++correct; } } fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total); free(id); free_image(orig); free_image(sized); } } void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen) { list *options = read_data_cfg(datacfg); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); image **alphabet = load_alphabet(); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); double time; char buff[256]; char *input = buff; float nms=.45; while(1){ if(filename){ strncpy(input, filename, 256); } else { printf("Enter Image Path: "); fflush(stdout); input = fgets(input, 256, stdin); if(!input) return; strtok(input, "\n"); } image im = load_image_color(input,0,0); image sized = letterbox_image(im, net->w, net->h); //image sized = resize_image(im, net->w, net->h); //image sized2 = resize_max(im, net->w); //image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h); //resize_network(net, sized.w, sized.h); layer l = net->layers[net->n-1]; float *X = sized.data; time=what_time_is_it_now(); network_predict(net, X); printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time); int nboxes = 0; detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); //printf("%d\n", nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes); free_detections(dets, nboxes); if(outfile){ save_image(im, outfile); } else{ save_image(im, "predictions"); #ifdef OPENCV make_window("predictions", 512, 512, 0); show_image(im, "predictions", 0); #endif } free_image(im); free_image(sized); if (filename) break; } } /* void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; float nms = .45; while(1){ image in = get_image_from_stream(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; float *X = in_s.data; network_predict(net, X); int nboxes = 0; detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int left = b.x-b.w/2.; int top = b.y-b.h/2.; censor_image(in, left, top, b.w, b.h); } } show_image(in, base); cvWaitKey(10); free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream(cap); free_image(in); } } #endif } void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; int count = 0; float nms = .45; while(1){ image in = get_image_from_stream(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; show_image(in, base); int nboxes = 0; float *X = in_s.data; network_predict(net, X); detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h; int dx = b.x*in.w-size/2.; int dy = b.y*in.h-size/2.; image bim = crop_image(in, dx, dy, size, size); char buff[2048]; sprintf(buff, "results/extract/%07d", count); ++count; save_image(bim, buff); free_image(bim); } } free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream(cap); free_image(in); } } #endif } */ /* void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets) { network_predict_image(net, im); layer l = net->layers[net->n-1]; int nboxes = num_boxes(net); fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); } */ void run_detector(int argc, char **argv) { char *prefix = find_char_arg(argc, argv, "-prefix", 0); float thresh = find_float_arg(argc, argv, "-thresh", .5); float hier_thresh = find_float_arg(argc, argv, "-hier", .5); int cam_index = find_int_arg(argc, argv, "-c", 0); int frame_skip = find_int_arg(argc, argv, "-s", 0); int avg = find_int_arg(argc, argv, "-avg", 3); if(argc < 4){ fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); return; } char *gpu_list = find_char_arg(argc, argv, "-gpus", 0); char *outfile = find_char_arg(argc, argv, "-out", 0); int *gpus = 0; int gpu = 0; int ngpus = 0; if(gpu_list){ printf("%s\n", gpu_list); int len = strlen(gpu_list); ngpus = 1; int i; for(i = 0; i < len; ++i){ if (gpu_list[i] == ',') ++ngpus; } gpus = calloc(ngpus, sizeof(int)); for(i = 0; i < ngpus; ++i){ gpus[i] = atoi(gpu_list); gpu_list = strchr(gpu_list, ',')+1; } } else { gpu = gpu_index; gpus = &gpu; ngpus = 1; } int clear = find_arg(argc, argv, "-clear"); int fullscreen = find_arg(argc, argv, "-fullscreen"); int width = find_int_arg(argc, argv, "-w", 0); int height = find_int_arg(argc, argv, "-h", 0); int fps = find_int_arg(argc, argv, "-fps", 0); //int class = find_int_arg(argc, argv, "-class", 0); char *datacfg = argv[3]; char *cfg = argv[4]; char *weights = (argc > 5) ? argv[5] : 0; char *filename = (argc > 6) ? argv[6]: 0; if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen); else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear); else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights); else if(0==strcmp(argv[2], "demo")) { list *options = read_data_cfg(datacfg); int classes = option_find_int(options, "classes", 20); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen); } //else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); //else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); }
ten_tusscher_2004_epi_S2_4.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S2_4.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5574211764260,0.00129305755715058,0.779441422719268,0.779241742711666,0.000175039240857358,0.484977289081740,0.00294257507368012,0.999998344595344,1.93700269716616e-08,1.89380174481509e-05,0.999773792418493,1.00755963480393,0.999999137126184,3.41466316398601e-05,1.23162815450729,9.71224673801957,139.552422843336}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.0344988699429,0.000243427554127383,0.000161272832250911,0.000484228011827550,0.275092424538870,0.175484829191378,0.164879494363494,3.77803127027096,0.0197412874581791,1.93055058781161,1099.31582404877,0.000553709594039336,0.144015543772373,0.0199814298252655,0.00826445055600327,9.00070147931675e-06}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
test_utils.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <string> #include <sstream> #include <iostream> #include <iomanip> #include <algorithm> #include <limits> #include <utility> #include <cstdint> extern "C" { #include "mmio.h" } #include <cuda.h> #include <cuda_runtime.h> #include <cuda_profiler_api.h> #include <library_types.h> #include <thrust/host_vector.h> #include <thrust/adjacent_difference.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <rmm_utils.h> #include "cugraph.h" #ifndef CUDA_RT_CALL #define CUDA_RT_CALL( call ) \ { \ cudaError_t cudaStatus = call; \ if ( cudaSuccess != cudaStatus ) { \ fprintf(stderr, "ERROR: CUDA RT call \"%s\" in line %d of file %s failed with %s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \ } \ } #endif std::function<void(gdf_column*)> gdf_col_deleter = [](gdf_column* col){ if (col) { col->size = 0; if(col->data){ cudaStream_t stream{nullptr}; ALLOC_FREE_TRY(col->data, stream); } delete col; } }; using gdf_column_ptr = typename std::unique_ptr<gdf_column, decltype(gdf_col_deleter)>; std::function<void(gdf_graph*)> gdf_graph_deleter = [](gdf_graph* G){delete G;}; using gdf_graph_ptr = typename std::unique_ptr<gdf_graph,decltype(gdf_graph_deleter)>; std::string getFileName(const std::string& s) { char sep = '/'; #ifdef _WIN32 sep = '\\'; #endif size_t i = s.rfind(sep, s.length()); if (i != std::string::npos) { return(s.substr(i+1, s.length() - i)); } return(""); } template <typename T> void verbose_diff(std::vector<T> & v1, std::vector<T> & v2) { for (unsigned int i = 0; i < v1.size(); ++i) { if (v1[i] != v2[i]) { std::cout << "[" << i <<"] : " << v1[i] << " vs. "<< v2[i]<<std::endl; } } } template <typename T> int eq(std::vector<T> & v1, std::vector<T> & v2) { if (v1 == v2) return 0; else { verbose_diff(v1,v2); return 1; } } template <typename T> void printv(size_t n, T* vec, int offset) { thrust::device_ptr<T> dev_ptr(vec); std::cout.precision(15); std::cout << "sample size = "<< n << ", offset = "<< offset << std::endl; thrust::copy(dev_ptr+offset,dev_ptr+offset+n, std::ostream_iterator<T>(std::cout, " "));//Assume no RMM dependency; TODO: check / test (potential BUG !!!!!) std::cout << std::endl; } template <typename T_ELEM> void ref_csr2csc (int m, int n, int nnz, const T_ELEM *csrVals, const int *csrRowptr, const int *csrColInd, T_ELEM *cscVals, int *cscRowind, int *cscColptr, int base=0){ int i,j, row, col, index; int * counters; T_ELEM val; /* early return */ if ((m <= 0) || (n <= 0) || (nnz <= 0)){ return; } /* build compressed column pointers */ memset(cscColptr, 0, (n+1)*sizeof(cscColptr[0])); cscColptr[0]=base; for (i=0; i<nnz; i++){ cscColptr[1+csrColInd[i]-base]++; } for(i=0; i<n; i++){ cscColptr[i+1]+=cscColptr[i]; } /* expand row indecis and copy them and values into csc arrays according to permutation */ counters = (int *)malloc(n*sizeof(counters[0])); memset(counters, 0, n*sizeof(counters[0])); for (i=0; i<m; i++){ for (j=csrRowptr[i]; j<csrRowptr[i+1]; j++){ row = i+base; col = csrColInd[j-base]; index=cscColptr[col-base]-base+counters[col-base]; counters[col-base]++; cscRowind[index]=row; if(csrVals!=NULL || cscVals!=NULL){ val = csrVals[j-base]; cscVals[index] = val; } } } free(counters); } template <typename T> int transition_matrix_cpu(int n, int e, int *csrRowPtrA, int *csrColIndA, T *weight, T* is_leaf) //omp_set_num_threads(4); //#pragma omp parallel { int j,row, row_size; //#pragma omp for for (row=0; row<n; row++) { row_size = csrRowPtrA[row+1] - csrRowPtrA[row]; if (row_size == 0) is_leaf[row]=1.0; else { is_leaf[row]=0.0; for (j=csrRowPtrA[row]; j<csrRowPtrA[row+1]; j++) weight[j] = 1.0/row_size; } } return 0; } template <typename T> void printCsrMatI(int m, int n, int nnz,std::vector<int> & csrRowPtr, std::vector<uint16_t> & csrColInd, std::vector<T> & csrVal) { std::vector<T> v(n); std::stringstream ss; ss.str(std::string()); ss << std::fixed; ss << std::setprecision(2); for (int i = 0; i < m; i++) { std::fill(v.begin(),v.end(),0); for (int j = csrRowPtr[i]; j < csrRowPtr[i+1]; j++) v[csrColInd[j]] = csrVal[j]; std::copy(v.begin(), v.end(), std::ostream_iterator<int>(ss, " ")); ss << "\n"; } ss << "\n"; std::cout<<ss.str(); } /// Read matrix properties from Matrix Market file /** Matrix Market file is assumed to be a sparse matrix in coordinate * format. * * @param f File stream for Matrix Market file. * @param tg Boolean indicating whether to convert matrix to general * format (from symmetric, Hermitian, or skew symmetric format). * @param t (Output) MM_typecode with matrix properties. * @param m (Output) Number of matrix rows. * @param n (Output) Number of matrix columns. * @param nnz (Output) Number of non-zero matrix entries. * @return Zero if properties were read successfully. Otherwise * non-zero. */ template <typename IndexType_> int mm_properties(FILE * f, int tg, MM_typecode * t, IndexType_ * m, IndexType_ * n, IndexType_ * nnz) { // Read matrix properties from file int mint, nint, nnzint; if(fseek(f,0,SEEK_SET)) { fprintf(stderr, "Error: could not set position in file\n"); return -1; } if(mm_read_banner(f,t)) { fprintf(stderr, "Error: could not read Matrix Market file banner\n"); return -1; } if(!mm_is_matrix(*t) || !mm_is_coordinate(*t)) { fprintf(stderr, "Error: file does not contain matrix in coordinate format\n"); return -1; } if(mm_read_mtx_crd_size(f,&mint,&nint,&nnzint)) { fprintf(stderr, "Error: could not read matrix dimensions\n"); return -1; } if(!mm_is_pattern(*t) && !mm_is_real(*t) && !mm_is_integer(*t) && !mm_is_complex(*t)) { fprintf(stderr, "Error: matrix entries are not valid type\n"); return -1; } *m = mint; *n = nint; *nnz = nnzint; // Find total number of non-zero entries if(tg && !mm_is_general(*t)) { // Non-diagonal entries should be counted twice IndexType_ nnzOld = *nnz; *nnz *= 2; // Diagonal entries should not be double-counted int i; int st; for(i=0; i<nnzOld; ++i) { // Read matrix entry IndexType_ row, col; double rval, ival; if (mm_is_pattern(*t)) st = fscanf(f, "%d %d\n", &row, &col); else if (mm_is_real(*t) || mm_is_integer(*t)) st = fscanf(f, "%d %d %lg\n", &row, &col, &rval); else // Complex matrix st = fscanf(f, "%d %d %lg %lg\n", &row, &col, &rval, &ival); if(ferror(f) || (st == EOF)) { fprintf(stderr, "Error: error %d reading Matrix Market file (entry %d)\n", st, i+1); return -1; } // Check if entry is diagonal if(row == col) --(*nnz); } } return 0; } /// Read Matrix Market file and convert to COO format matrix /** Matrix Market file is assumed to be a sparse matrix in coordinate * format. * * @param f File stream for Matrix Market file. * @param tg Boolean indicating whether to convert matrix to general * format (from symmetric, Hermitian, or skew symmetric format). * @param nnz Number of non-zero matrix entries. * @param cooRowInd (Output) Row indices for COO matrix. Should have * at least nnz entries. * @param cooColInd (Output) Column indices for COO matrix. Should * have at least nnz entries. * @param cooRVal (Output) Real component of COO matrix * entries. Should have at least nnz entries. Ignored if null * pointer. * @param cooIVal (Output) Imaginary component of COO matrix * entries. Should have at least nnz entries. Ignored if null * pointer. * @return Zero if matrix was read successfully. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> int mm_to_coo(FILE *f, int tg, IndexType_ nnz, IndexType_ * cooRowInd, IndexType_ * cooColInd, ValueType_ * cooRVal , ValueType_ * cooIVal) { // Read matrix properties from file MM_typecode t; int m, n, nnzOld; if(fseek(f,0,SEEK_SET)) { fprintf(stderr, "Error: could not set position in file\n"); return -1; } if(mm_read_banner(f,&t)) { fprintf(stderr, "Error: could not read Matrix Market file banner\n"); return -1; } if(!mm_is_matrix(t) || !mm_is_coordinate(t)) { fprintf(stderr, "Error: file does not contain matrix in coordinate format\n"); return -1; } if(mm_read_mtx_crd_size(f,&m,&n,&nnzOld)) { fprintf(stderr, "Error: could not read matrix dimensions\n"); return -1; } if(!mm_is_pattern(t) && !mm_is_real(t) && !mm_is_integer(t) && !mm_is_complex(t)) { fprintf(stderr, "Error: matrix entries are not valid type\n"); return -1; } // Add each matrix entry in file to COO format matrix IndexType_ i; // Entry index in Matrix Market file IndexType_ j = 0; // Entry index in COO format matrix for(i=0;i<nnzOld;++i) { // Read entry from file int row, col; double rval, ival; int st; if (mm_is_pattern(t)) { st = fscanf(f, "%d %d\n", &row, &col); rval = 1.0; ival = 0.0; } else if (mm_is_real(t) || mm_is_integer(t)) { st = fscanf(f, "%d %d %lg\n", &row, &col, &rval); ival = 0.0; } else // Complex matrix st = fscanf(f, "%d %d %lg %lg\n", &row, &col, &rval, &ival); if(ferror(f) || (st == EOF)) { fprintf(stderr, "Error: error %d reading Matrix Market file (entry %d)\n", st, i+1); return -1; } // Switch to 0-based indexing --row; --col; // Record entry cooRowInd[j] = row; cooColInd[j] = col; if(cooRVal != NULL) cooRVal[j] = rval; if(cooIVal != NULL) cooIVal[j] = ival; ++j; // Add symmetric complement of non-diagonal entries if(tg && !mm_is_general(t) && (row!=col)) { // Modify entry value if matrix is skew symmetric or Hermitian if(mm_is_skew(t)) { rval = -rval; ival = -ival; } else if(mm_is_hermitian(t)) { ival = -ival; } // Record entry cooRowInd[j] = col; cooColInd[j] = row; if(cooRVal != NULL) cooRVal[j] = rval; if(cooIVal != NULL) cooIVal[j] = ival; ++j; } } return 0; } /// Compare two tuples based on the element indexed by i class lesser_tuple { const int i; public: lesser_tuple(int _i) : i(_i) {} template<typename Tuple1, typename Tuple2> __host__ __device__ bool operator()(const Tuple1 t1, const Tuple2 t2) { switch(i) { case 0: return (thrust::get<0>(t1) < thrust::get<0>(t2)); case 1: return (thrust::get<1>(t1) < thrust::get<1>(t2)); default: return (thrust::get<0>(t1) < thrust::get<0>(t2)); } } }; /// Sort entries in COO format matrix /** Sort is stable. * * @param nnz Number of non-zero matrix entries. * @param sort_by_row Boolean indicating whether matrix entries * will be sorted by row index or by column index. * @param cooRowInd Row indices for COO matrix. * @param cooColInd Column indices for COO matrix. * @param cooRVal Real component for COO matrix entries. Ignored if * null pointer. * @param cooIVal Imaginary component COO matrix entries. Ignored if * null pointer. */ template <typename IndexType_, typename ValueType_> void coo_sort(IndexType_ nnz, int sort_by_row, IndexType_ * cooRowInd, IndexType_ * cooColInd, ValueType_ * cooRVal, ValueType_ * cooIVal) { // Determine whether to sort by row or by column int i; if(sort_by_row == 0) i = 1; else i = 0; // Apply stable sort using namespace thrust; if((cooRVal==NULL) && (cooIVal==NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz)), lesser_tuple(i)); else if((cooRVal==NULL) && (cooIVal!=NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooIVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz,cooIVal+nnz)), lesser_tuple(i)); else if((cooRVal!=NULL) && (cooIVal==NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooRVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz,cooRVal+nnz)), lesser_tuple(i)); else stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooRVal,cooIVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz, cooRVal+nnz,cooIVal+nnz)), lesser_tuple(i)); } /// Compress sorted list of indices /** For use in converting COO format matrix to CSR or CSC format. * * @param n Maximum index. * @param nnz Number of non-zero matrix entries. * @param sortedIndices Sorted list of indices (COO format). * @param compressedIndices (Output) Compressed list of indices (CSR * or CSC format). Should have at least n+1 entries. */ template <typename IndexType_> void coo_compress(IndexType_ m, IndexType_ n, IndexType_ nnz, const IndexType_ * __restrict__ sortedIndices, IndexType_ * __restrict__ compressedIndices) { IndexType_ i; // Initialize everything to zero memset(compressedIndices, 0, (m+1)*sizeof(IndexType_)); // Count number of elements per row for(i=0; i<nnz; ++i) ++(compressedIndices[sortedIndices[i]+1]); // Compute cumulative sum to obtain row offsets/pointers for(i=0; i<m; ++i) compressedIndices[i+1] += compressedIndices[i]; } /// Convert COO format matrix to CSR format /** On output, matrix entries in COO format matrix will be sorted * (primarily by row index, secondarily by column index). * * @param m Number of matrix rows. * @param n Number of matrix columns. * @param nnz Number of non-zero matrix entries. * @param cooRowInd Row indices for COO matrix. * @param cooColInd Column indices for COO matrix. * @param cooRVal Real component of COO matrix entries. Ignored if * null pointer. * @param cooIVal Imaginary component of COO matrix entries. Ignored * if null pointer. * @param csrRowPtr Row pointers for CSR matrix. Should have at least * n+1 entries. * @param csrColInd Column indices for CSR matrix (identical to * output of cooColInd). Should have at least nnz entries. Ignored if * null pointer. * @param csrRVal Real component of CSR matrix entries (identical to * output of cooRVal). Should have at least nnz entries. Ignored if * null pointer. * @param csrIVal Imaginary component of CSR matrix entries * (identical to output of cooIVal). Should have at least nnz * entries. Ignored if null pointer. * @return Zero if matrix was converted successfully. Otherwise * non-zero. */ template <typename IndexType_, typename ValueType_> int coo_to_csr(IndexType_ m, IndexType_ n, IndexType_ nnz, IndexType_ * __restrict__ cooRowInd, IndexType_ * __restrict__ cooColInd, ValueType_ * __restrict__ cooRVal, ValueType_ * __restrict__ cooIVal, IndexType_ * __restrict__ csrRowPtr, IndexType_ * __restrict__ csrColInd, ValueType_ * __restrict__ csrRVal, ValueType_ * __restrict__ csrIVal) { // Convert COO to CSR matrix coo_sort(nnz, 0, cooRowInd, cooColInd, cooRVal, cooIVal); coo_sort(nnz, 1, cooRowInd, cooColInd, cooRVal, cooIVal); coo_compress(m, n, nnz, cooRowInd, csrRowPtr); // Copy arrays if(csrColInd!=NULL) memcpy(csrColInd, cooColInd, nnz*sizeof(IndexType_)); if((cooRVal!=NULL) && (csrRVal!=NULL)) memcpy(csrRVal, cooRVal, nnz*sizeof(ValueType_)); if((cooIVal!=NULL) && (csrIVal!=NULL)) memcpy(csrIVal, cooIVal, nnz*sizeof(ValueType_)); return 0; } int read_binary_vector ( FILE* fpin, int n, std::vector<float>& val ) { size_t is_read1; double* t_storage = new double[n]; is_read1 = fread(t_storage, sizeof(double), n, fpin); for (int i = 0; i < n; i++) { if (t_storage[i] == DBL_MAX) val[i] = FLT_MAX; else if (t_storage[i] == -DBL_MAX) val[i] = -FLT_MAX; else val[i] = static_cast<float>(t_storage[i]); } delete[] t_storage; if (is_read1 != (size_t)n) { printf("%s", "I/O fail\n"); return 1; } return 0; } int read_binary_vector ( FILE* fpin, int n, std::vector<double>& val ) { size_t is_read1; is_read1 = fread(&val[0], sizeof(double), n, fpin); if (is_read1 != (size_t)n) { printf("%s", "I/O fail\n"); return 1; } return 0; } // Creates a gdf_column from a std::vector template <typename col_type> gdf_column_ptr create_gdf_column(std::vector<col_type> const & host_vector) { // Create a new instance of a gdf_column with a custom deleter that will free // the associated device memory when it eventually goes out of scope gdf_column_ptr the_column{new gdf_column, gdf_col_deleter}; // Allocate device storage for gdf_column and copy contents from host_vector const size_t input_size_bytes = host_vector.size() * sizeof(col_type); cudaStream_t stream{nullptr}; ALLOC_MANAGED_TRY((void**)&(the_column->data), input_size_bytes, stream); cudaMemcpy(the_column->data, host_vector.data(), input_size_bytes, cudaMemcpyHostToDevice); // Deduce the type and set the gdf_dtype accordingly gdf_dtype gdf_col_type; if(std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32; else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64; // Fill the gdf_column members the_column->valid = nullptr; the_column->null_count = 0; the_column->size = host_vector.size(); the_column->dtype = gdf_col_type; gdf_dtype_extra_info extra_info; extra_info.time_unit = TIME_UNIT_NONE; the_column->dtype_info = extra_info; return the_column; } // Creates a gdf_column from a std::vector template <typename col_type> void create_gdf_column(std::vector<col_type> const & host_vector, gdf_column * the_column) { // Allocate device storage for gdf_column and copy contents from host_vector const size_t input_size_bytes = host_vector.size() * sizeof(col_type); cudaStream_t stream{nullptr}; ALLOC_MANAGED_TRY((void**)&(the_column->data), input_size_bytes, stream); cudaMemcpy(the_column->data, host_vector.data(), input_size_bytes, cudaMemcpyHostToDevice); // Deduce the type and set the gdf_dtype accordingly gdf_dtype gdf_col_type; if(std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32; else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64; // Fill the gdf_column members the_column->valid = nullptr; the_column->null_count = 0; the_column->size = host_vector.size(); the_column->dtype = gdf_col_type; gdf_dtype_extra_info extra_info; extra_info.time_unit = TIME_UNIT_NONE; the_column->dtype_info = extra_info; } void gdf_col_delete(gdf_column* col) { if (col) { col->size = 0; cudaStream_t stream{nullptr}; if(col->data) ALLOC_FREE_TRY(col->data, stream); #if 1 // If delete col is executed, the memory pointed by col is no longer valid and // can be used in another memory allocation, so executing col->data = nullptr // after delete col is dangerous, also, col = nullptr has no effect here (the // address is passed by value, for col = nullptr should work, the input // parameter should be gdf_column*& col (or alternatively, gdf_column** col and // *col = nullptr also work) col->data = nullptr; delete col; #else delete col; col->data = nullptr; col = nullptr; #endif } }
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/option-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif #if defined(MAGICKCORE_XML_DELEGATE) # if defined(MAGICKCORE_WINDOWS_SUPPORT) # if !defined(__MINGW32__) # include <win32config.h> # endif # endif # include <libxml/parser.h> # include <libxml/tree.h> #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); image->color_profile.length=clone_image->color_profile.length; image->color_profile.info=clone_image->color_profile.info; image->iptc_profile.length=clone_image->iptc_profile.length; image->iptc_profile.info=clone_image->iptc_profile.info; if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); if (LocaleCompare(name,"icc") == 0) { /* Continue to support deprecated color profile for now. */ image->color_profile.length=0; image->color_profile.info=(unsigned char *) NULL; } if (LocaleCompare(name,"iptc") == 0) { /* Continue to support deprecated IPTC profile for now. */ image->iptc_profile.length=0; image->iptc_profile.info=(unsigned char *) NULL; } WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) typedef struct _LCMSInfo { ColorspaceType colorspace; cmsUInt32Number type; size_t channels; cmsHPROFILE profile; int intent; double **magick_restrict pixels, scale, translate; } LCMSInfo; #if LCMS_VERSION < 2060 static void* cmsGetContextUserData(cmsContext ContextID) { return(ContextID); } static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData) { magick_unreferenced(Plugin); return((cmsContext) UserData); } static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID), cmsLogErrorHandlerFunction Fn) { magick_unreferenced(ContextID); cmsSetLogErrorHandler(Fn); } static void cmsDeleteContext(cmsContext magick_unused(ContextID)) { magick_unreferenced(ContextID); } #endif static double **DestroyPixelThreadSet(double **pixels) { register ssize_t i; if (pixels == (double **) NULL) return((double **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (double *) NULL) pixels[i]=(double *) RelinquishMagickMemory(pixels[i]); pixels=(double **) RelinquishMagickMemory(pixels); return(pixels); } static double **AcquirePixelThreadSet(const size_t columns, const size_t channels) { double **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(double **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (double **) NULL) return((double **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(double *) AcquireQuantumMemory(columns,channels* sizeof(**pixels)); if (pixels[i] == (double *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info, const LCMSInfo *target_info,const cmsUInt32Number flags, cmsContext cms_context) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile, source_info->type,target_info->profile,target_info->type, target_info->intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } static void LCMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { Image *image; (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); image=(Image *) cmsGetContextUserData(context); if (image != (Image *) NULL) (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageWarning,"UnableToTransformColorspace","`%s'",image->filename); } #endif static MagickBooleanType SetsRGBImageProfile(Image *image) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length, const MagickBooleanType magick_unused(clone)) { #define GetLCMSPixel(source_info,pixel) \ (source_info.scale*QuantumScale*(pixel)+source_info.translate) #define ProfileImageTag "Profile/Image" #define SetLCMSPixel(target_info,pixel) \ ClampToQuantum(target_info.scale*QuantumRange*(pixel)+target_info.translate) #define ThrowProfileException(severity,tag,context) \ { \ if (profile != (StringInfo *) NULL) \ profile=DestroyStringInfo(profile); \ if (cms_context != (cmsContext) NULL) \ cmsDeleteContext(cms_context); \ if (source_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_info.profile); \ if (target_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_info.profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; magick_unreferenced(clone); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace"); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image); value=GetImageProperty(image,"exif:InteroperabilityIndex"); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image); icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(&image->exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (LCMS)", image->filename); #else { cmsContext cms_context; LCMSInfo source_info, target_info; /* Transform pixel colors as defined by the color profiles. */ cms_context=cmsCreateContext(NULL,image); if (cms_context == (cmsContext) NULL) ThrowBinaryImageException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); cmsSetLogErrorHandlerTHR(cms_context,LCMSExceptionHandler); source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_info.profile == (cmsHPROFILE) NULL) { cmsDeleteContext(cms_context); ThrowBinaryImageException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile); else { CacheView *image_view; cmsColorSpaceSignature signature; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags; ExceptionInfo *exception; MagickOffsetType progress; ssize_t y; exception=(&image->exception); target_info.profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_info.profile=source_info.profile; source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(icc_profile),(cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_info.profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } source_info.scale=1.0; source_info.translate=0.0; source_info.colorspace=sRGBColorspace; source_info.channels=3; switch (cmsGetColorSpace(source_info.profile)) { case cmsSigCmykData: { source_info.colorspace=CMYKColorspace; source_info.channels=4; source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; source_info.scale=100.0; break; } case cmsSigGrayData: { source_info.colorspace=GRAYColorspace; source_info.channels=1; source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; break; } case cmsSigLabData: { source_info.colorspace=LabColorspace; source_info.type=(cmsUInt32Number) TYPE_Lab_DBL; source_info.scale=100.0; source_info.translate=(-0.5); break; } case cmsSigRgbData: { source_info.colorspace=sRGBColorspace; source_info.type=(cmsUInt32Number) TYPE_RGB_DBL; break; } case cmsSigXYZData: { source_info.colorspace=XYZColorspace; source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } signature=cmsGetPCS(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_info.profile); target_info.scale=1.0; target_info.translate=0.0; target_info.channels=3; switch (signature) { case cmsSigCmykData: { target_info.colorspace=CMYKColorspace; target_info.channels=4; target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; target_info.scale=0.01; break; } case cmsSigGrayData: { target_info.colorspace=GRAYColorspace; target_info.channels=1; target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; break; } case cmsSigLabData: { target_info.colorspace=LabColorspace; target_info.type=(cmsUInt32Number) TYPE_Lab_DBL; target_info.scale=0.01; target_info.translate=0.5; break; } case cmsSigRgbData: { target_info.colorspace=sRGBColorspace; target_info.type=(cmsUInt32Number) TYPE_RGB_DBL; break; } case cmsSigXYZData: { target_info.colorspace=XYZColorspace; target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: { target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC; break; } case PerceptualIntent: { target_info.intent=INTENT_PERCEPTUAL; break; } case RelativeIntent: { target_info.intent=INTENT_RELATIVE_COLORIMETRIC; break; } case SaturationIntent: { target_info.intent=INTENT_SATURATION; break; } default: { target_info.intent=INTENT_PERCEPTUAL; break; } } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(&source_info,&target_info, flags,cms_context); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_info.pixels=AcquirePixelThreadSet(image->columns, source_info.channels); target_info.pixels=AcquirePixelThreadSet(image->columns, target_info.channels); if ((source_info.pixels == (double **) NULL) || (target_info.pixels == (double **) NULL)) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); profile=DestroyStringInfo(profile); if (source_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); return(MagickFalse); } if (target_info.colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_info.colorspace); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register double *p; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); p=source_info.pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetLCMSPixel(source_info,GetPixelRed(q)); if (source_info.channels > 1) { *p++=GetLCMSPixel(source_info,GetPixelGreen(q)); *p++=GetLCMSPixel(source_info,GetPixelBlue(q)); } if (source_info.channels > 3) { *p=GetLCMSPixel(source_info,0); if (indexes != (IndexPacket *) NULL) *p=GetLCMSPixel(source_info,GetPixelIndex(indexes+x)); p++; } q++; } cmsDoTransform(transform[id],source_info.pixels[id], target_info.pixels[id],(unsigned int) image->columns); p=target_info.pixels[id]; q-=image->columns; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,SetLCMSPixel(target_info,*p)); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); p++; if (target_info.channels > 1) { SetPixelGreen(q,SetLCMSPixel(target_info,*p)); p++; SetPixelBlue(q,SetLCMSPixel(target_info,*p)); p++; } if (target_info.channels > 3) { if (indexes != (IndexPacket *) NULL) SetPixelIndex(indexes+x,SetLCMSPixel(target_info,*p)); p++; } q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ProfileImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_info.colorspace); switch (signature) { case cmsSigRgbData: { image->type=image->matte == MagickFalse ? TrueColorType : TrueColorMatteType; break; } case cmsSigCmykData: { image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; break; } case cmsSigGrayData: { image->type=image->matte == MagickFalse ? GrayscaleType : GrayscaleMatteType; break; } default: break; } target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); } (void) cmsCloseProfile(source_info.profile); cmsDeleteContext(cms_context); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); if (LocaleCompare(name,"icc") == 0) { /* Continue to support deprecated color profile for now. */ image->color_profile.length=0; image->color_profile.info=(unsigned char *) NULL; } if (LocaleCompare(name,"iptc") == 0) { /* Continue to support deprecated IPTC profile for now. */ image->iptc_profile.length=0; image->iptc_profile.info=(unsigned char *) NULL; } WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; register const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block) { const unsigned char *datum; register const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->x_resolution=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->y_resolution=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->x_resolution/=2.54; image->y_resolution/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } #if defined(MAGICKCORE_XML_DELEGATE) static MagickBooleanType ValidateXMPProfile(const StringInfo *profile) { xmlDocPtr document; /* Parse XML profile. */ document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int) GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR | XML_PARSE_NOWARNING); if (document == (xmlDocPtr) NULL) return(MagickFalse); xmlFreeDoc(document); return(MagickTrue); } #else static MagickBooleanType ValidateXMPProfile(const StringInfo *profile) { return(MagickFalse); } #endif static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive) { char key[MaxTextExtent]; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((LocaleCompare(name,"xmp") == 0) && (ValidateXMPProfile(profile) == MagickFalse)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageWarning,"CorruptImageProfile","`%s'",name); return(MagickTrue); } if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MaxTextExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),CloneStringInfo(profile)); if ((status != MagickFalse) && ((LocaleCompare(name,"icc") == 0) || (LocaleCompare(name,"icm") == 0))) { const StringInfo *icc_profile; /* Continue to support deprecated color profile member. */ icc_profile=GetImageProfile(image,name); if (icc_profile != (const StringInfo *) NULL) { image->color_profile.length=GetStringInfoLength(icc_profile); image->color_profile.info=GetStringInfoDatum(icc_profile); } } if ((status != MagickFalse) && ((LocaleCompare(name,"iptc") == 0) || (LocaleCompare(name,"8bim") == 0))) { const StringInfo *iptc_profile; /* Continue to support deprecated IPTC profile member. */ iptc_profile=GetImageProfile(image,name); if (iptc_profile != (const StringInfo *) NULL) { image->iptc_profile.length=GetStringInfoLength(iptc_profile); image->iptc_profile.info=GetStringInfoDatum(iptc_profile); } } if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,profile); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,profile); } return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile) { return(SetImageProfileInternal(image,name,profile,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->x_resolution*2.54* 65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) (image->x_resolution* 65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->y_resolution*2.54* 65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) (image->y_resolution* 65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } static MagickBooleanType SyncExifProfile(Image *image, StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; register unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } MagickExport MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); }
displacement_lagrangemultiplier_residual_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" #include "utilities/constraint_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierResidualContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierResidualContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierResidualContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierResidualContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED ); KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor (parameters) * @param DispRatioTolerance Relative tolerance for displacement residual error * @param DispAbsTolerance Absolute tolerance for displacement residual error * @param RotRatioTolerance Relative tolerance for rotation residual error * @param RotAbsTolerance Absolute tolerance for rotation residual error * @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierResidualContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType RotRatioTolerance, const TDataType RotAbsTolerance, const TDataType LMRatioTolerance, const TDataType LMAbsTolerance, const bool EnsureContact = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); // The displacement residual mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The rotation residual mRotRatioTolerance = RotRatioTolerance; mRotAbsTolerance = RotAbsTolerance; // The normal contact residual mLMRatioTolerance = LMRatioTolerance; mLMAbsTolerance = LMAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierResidualContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } //* Copy constructor. DisplacementLagrangeMultiplierResidualContactCriteria( DisplacementLagrangeMultiplierResidualContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm) ,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm) ,mRotRatioTolerance(rOther.mRotRatioTolerance) ,mRotAbsTolerance(rOther.mRotAbsTolerance) ,mRotInitialResidualNorm(rOther.mRotInitialResidualNorm) ,mRotCurrentResidualNorm(rOther.mRotCurrentResidualNorm) ,mLMRatioTolerance(rOther.mLMRatioTolerance) ,mLMAbsTolerance(rOther.mLMAbsTolerance) ,mLMInitialResidualNorm(rOther.mLMInitialResidualNorm) ,mLMCurrentResidualNorm(rOther.mLMCurrentResidualNorm) { } /// Destructor. ~DisplacementLagrangeMultiplierResidualContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something // Initialize TDataType disp_residual_solution_norm = 0.0, rot_residual_solution_norm = 0.0, lm_residual_solution_norm = 0.0; IndexType disp_dof_num(0), rot_dof_num(0), lm_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType residual_dof_value = 0.0; // The number of active dofs const std::size_t number_active_dofs = rb.size(); // Auxiliar displacement DoF check const std::function<bool(const VariableData&)> check_without_rot = [](const VariableData& rCurrVar) -> bool {return true;}; const std::function<bool(const VariableData&)> check_with_rot = [](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));}; const auto* p_check_disp = (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot; // Loop over Dofs #pragma omp parallel for firstprivate(dof_id, residual_dof_value) reduction(+:disp_residual_solution_norm,rot_residual_solution_norm,lm_residual_solution_norm,disp_dof_num,rot_dof_num,lm_dof_num) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; dof_id = it_dof->EquationId(); // Check dof id is solved if (dof_id < number_active_dofs) { if (mActiveDofs[dof_id] == 1) { residual_dof_value = rb[dof_id]; const auto& r_curr_var = it_dof->GetVariable(); if ((r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (r_curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) { lm_residual_solution_norm += std::pow(residual_dof_value, 2); ++lm_dof_num; } else if ((*p_check_disp)(r_curr_var)) { disp_residual_solution_norm += std::pow(residual_dof_value, 2); ++disp_dof_num; } else { // We will assume is rotation dof KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl; rot_residual_solution_norm += std::pow(residual_dof_value, 2); ++rot_dof_num; } } } } mDispCurrentResidualNorm = disp_residual_solution_norm; mRotCurrentResidualNorm = rot_residual_solution_norm; mLMCurrentResidualNorm = lm_residual_solution_norm; TDataType residual_disp_ratio = 1.0; TDataType residual_rot_ratio = 1.0; TDataType residual_lm_ratio = 1.0; // We initialize the solution if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET)) { mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm; mLMInitialResidualNorm = (lm_residual_solution_norm == 0.0) ? 1.0 : lm_residual_solution_norm; residual_disp_ratio = 1.0; residual_lm_ratio = 1.0; if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { mRotInitialResidualNorm = (rot_residual_solution_norm == 0.0) ? 1.0 : rot_residual_solution_norm; residual_rot_ratio = 1.0; } mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, true); } // We calculate the ratio of the displacements residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm; // We calculate the ratio of the rotations residual_rot_ratio = mRotCurrentResidualNorm/mRotInitialResidualNorm; // We calculate the ratio of the LM residual_lm_ratio = mLMCurrentResidualNorm/mLMInitialResidualNorm; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; // We calculate the absolute norms const TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num; const TDataType residual_rot_abs = mRotCurrentResidualNorm/rot_dof_num; const TDataType residual_lm_abs = mLMCurrentResidualNorm/lm_dof_num; // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_rot_ratio << mRotRatioTolerance << residual_rot_abs << mRotAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance; } else { Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance; } } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << residual_rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << residual_rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl; } KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << residual_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << residual_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tROTATION: RATIO = " << residual_rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << residual_rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl; } KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tLAGRANGE MUL: RATIO = " << residual_lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << residual_lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl; } } } r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > residual_lm_ratio) ? residual_disp_ratio : residual_lm_ratio; r_process_info[RESIDUAL_NORM] = (residual_lm_abs > mLMAbsTolerance) ? residual_lm_abs : mLMAbsTolerance; // We check if converged const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance); const bool rot_converged = (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (residual_rot_ratio <= mRotRatioTolerance || residual_rot_abs <= mRotAbsTolerance) : true; const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) ? true : (residual_lm_ratio <= mLMRatioTolerance || residual_lm_abs <= mLMAbsTolerance); if (disp_converged && rot_converged && lm_converged ) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) Table << BOLDFONT(FGRN(" Achieved")); else Table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart) override { // Initialize BaseType::mConvergenceCriteriaIsInitialized = true; // Check rotation dof mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart)); // Initialize header ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { r_table.AddColumn("RT RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); } r_table.AddColumn("LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, true); } } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Initialize flag mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); // Filling mActiveDofs when MPC exist ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet); } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "displacement_lagrangemultiplier_residual_contact_criteria", "ensure_contact" : false, "print_convergence_criterion" : false, "residual_relative_tolerance" : 1.0e-4, "residual_absolute_tolerance" : 1.0e-9, "rotation_residual_relative_tolerance" : 1.0e-4, "rotation_residual_absolute_tolerance" : 1.0e-9, "contact_residual_relative_tolerance" : 1.0e-4, "contact_residual_absolute_tolerance" : 1.0e-9 })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "displacement_lagrangemultiplier_residual_contact_criteria"; } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // The displacement residual mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble(); // The rotation residual mRotRatioTolerance = ThisParameters["rotation_residual_relative_tolerance"].GetDouble(); mRotAbsTolerance = ThisParameters["rotation_residual_absolute_tolerance"].GetDouble(); // The contact residual mLMRatioTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); mLMAbsTolerance = ThisParameters["contact_residual_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual TDataType mRotRatioTolerance; /// The ratio threshold for the norm of the rotation residual TDataType mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation residual TDataType mRotInitialResidualNorm; /// The reference norm of the rotation residual TDataType mRotCurrentResidualNorm; /// The current norm of the rotation residual TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM residual TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM residual TDataType mLMInitialResidualNorm; /// The reference norm of the LM residual TDataType mLMCurrentResidualNorm; /// The current norm of the LM residual std::vector<int> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierResidualContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(4)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H */
GB_binop__div_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__div_fp32 // A.*B function (eWiseMult): GB_AemultB__div_fp32 // A*D function (colscale): GB_AxD__div_fp32 // D*A function (rowscale): GB_DxB__div_fp32 // C+=B function (dense accum): GB_Cdense_accumB__div_fp32 // C+=b function (dense accum): GB_Cdense_accumb__div_fp32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_fp32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_fp32 // C=scalar+B GB_bind1st__div_fp32 // C=scalar+B' GB_bind1st_tran__div_fp32 // C=A+scalar GB_bind2nd__div_fp32 // C=A'+scalar GB_bind2nd_tran__div_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = (aij / bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x / y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_FP32 || GxB_NO_DIV_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__div_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__div_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__div_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__div_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__div_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__div_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__div_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__div_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__div_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = (x / bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__div_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = (aij / y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x / aij) ; \ } GrB_Info GB_bind1st_tran__div_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij / y) ; \ } GrB_Info GB_bind2nd_tran__div_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dpado.202001080943.clean_up_labels.h
// // Created by Zhen Peng on 1/6/20. // #ifndef PADO_DPADO_H #define PADO_DPADO_H #include <vector> //#include <unordered_map> #include <map> #include <algorithm> #include <iostream> #include <limits.h> //#include <xmmintrin.h> #include <immintrin.h> #include <bitset> #include <math.h> #include <fstream> #include <omp.h> #include "globals.h" #include "dglobals.h" #include "dgraph.h" namespace PADO { template <VertexID BATCH_SIZE = 1024> class DistBVCPLL { private: static const VertexID BITPARALLEL_SIZE = 50; const inti THRESHOLD_PARALLEL = 80; // Structure for the type of label struct IndexType { struct Batch { VertexID batch_id; // Batch ID VertexID start_index; // Index to the array distances where the batch starts VertexID size; // Number of distances element in this batch Batch() = default; Batch(VertexID batch_id_, VertexID start_index_, VertexID size_): batch_id(batch_id_), start_index(start_index_), size(size_) { } }; struct DistanceIndexType { VertexID start_index; // Index to the array vertices where the same-ditance vertices start VertexID size; // Number of the same-distance vertices UnweightedDist dist; // The real distance DistanceIndexType() = default; DistanceIndexType(VertexID start_index_, VertexID size_, UnweightedDist dist_): start_index(start_index_), size(size_), dist(dist_) { } }; // Bit-parallel Labels UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} std::vector<Batch> batches; // Batch info std::vector<DistanceIndexType> distances; // Distance info std::vector<VertexID> vertices; // Vertices in the label, presented as temporary ID size_t get_size_in_bytes() const { return sizeof(bp_dist) + sizeof(bp_sets) + batches.size() * sizeof(Batch) + distances.size() * sizeof(DistanceIndexType) + vertices.size() * sizeof(VertexID); } void clean_all_indices() { std::vector<Batch>().swap(batches); std::vector<DistanceIndexType>().swap(distances); std::vector<VertexID>().swap(vertices); // batches.swap(std::vector<Batch>()); // distances.swap(std::vector<DistanceIndexType>()); // vertices.swap(std::vector<VertexID>()); } }; //__attribute__((aligned(64))); struct ShortIndex { // I use BATCH_SIZE + 1 bit for indicator bit array. // The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already. // In this way, it helps update_label_indices() and can be reset along with other indicator elements. // std::bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE + 1, 0); // Use a queue to store candidates std::vector<VertexID> candidates_que = std::vector<VertexID>(BATCH_SIZE); VertexID end_candidates_que = 0; std::vector<uint8_t> is_candidate = std::vector<uint8_t>(BATCH_SIZE, 0); void indicator_reset() { std::fill(indicator.begin(), indicator.end(), 0); } }; //__attribute__((aligned(64))); // Type of Bit-Parallel Label struct BPLabelType { UnweightedDist bp_dist[BITPARALLEL_SIZE] = { 0 }; uint64_t bp_sets[BITPARALLEL_SIZE][2] = { {0} }; // [0]: S^{-1}, [1]: S^{0} }; // Type of Label Message Unit, for initializing distance table struct LabelTableUnit { VertexID root_id; VertexID label_global_id; UnweightedDist dist; LabelTableUnit() = default; LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : root_id(r), label_global_id(l), dist(d) {} }; // Type of BitParallel Label Message Unit for initializing bit-parallel labels struct MsgBPLabel { VertexID r_root_id; UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; MsgBPLabel() = default; MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) : r_root_id(r) { memcpy(bp_dist, dist, sizeof(bp_dist)); memcpy(bp_sets, sets, sizeof(bp_sets)); } }; VertexID num_v = 0; VertexID num_masters = 0; // VertexID BATCH_SIZE = 0; int host_id = 0; int num_hosts = 0; MPI_Datatype V_ID_Type; std::vector<IndexType> L; inline void bit_parallel_push_labels( const DistGraph &G, VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, UnweightedDist iter); inline void bit_parallel_labeling( const DistGraph &G, std::vector<uint8_t> &used_bp_roots); // inline void bit_parallel_push_labels( // const DistGraph &G, // VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // UnweightedDist iter); // inline void bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots); inline void batch_process( const DistGraph &G, const VertexID b_id, const VertexID roots_start, const VertexID roots_size, const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated); // std::vector<bool> &once_candidated); inline VertexID initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots); // inline void push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline void schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter); inline void local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); inline void local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); // inline void local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline bool distance_query( VertexID cand_root_id, VertexID v_id, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter); inline void insert_label_only_seq( VertexID cand_root_id, VertexID v_id, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send); // UnweightedDist iter); inline void insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send); inline void update_label_indices( VertexID v_id, VertexID inserted_count, // std::vector<IndexType> &L, std::vector<ShortIndex> &short_index, VertexID b_id, UnweightedDist iter); inline void reset_at_end( // const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table); // template <typename E_T, typename F> // inline void every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun); template <typename E_T> inline void one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv); // // Function: get the destination host id which is i hop from this host. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_me_host_id(int hop) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // return (host_id + hop + num_hosts) % num_hosts; // } // // Function: get the destination host id which is i hop from the root. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_root_host_id(int hop, int root) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // assert(root >= 0 && root < num_hosts); // return (root + hop + num_hosts) % num_hosts; // } void dump_labels( const DistGraph &G, const VertexID roots_start, const VertexID roots_size); size_t get_index_size() const { size_t bytes = 0; for (VertexID v_i = 0; v_i < num_masters; ++v_i) { bytes += L[v_i].get_size_in_bytes(); } return bytes; } // Test only // uint64_t normal_hit_count = 0; // uint64_t bp_hit_count = 0; // uint64_t total_check_count = 0; // uint64_t normal_check_count = 0; // uint64_t total_candidates_num = 0; // uint64_t set_candidates_num = 0; // double initializing_time = 0; // double candidating_time = 0; // double adding_time = 0; // double distance_query_time = 0; // double init_index_time = 0; // double init_dist_matrix_time = 0; // double init_start_reset_time = 0; // double init_indicators_time = 0; //L2CacheMissRate cache_miss; double message_time = 0; double bp_labeling_time = 0; double initializing_time = 0; double scatter_time = 0; double gather_time = 0; double clearup_time = 0; // TotalInstructsExe candidating_ins_count; // TotalInstructsExe adding_ins_count; // TotalInstructsExe bp_labeling_ins_count; // TotalInstructsExe bp_checking_ins_count; // TotalInstructsExe dist_query_ins_count; // End test public: // std::pair<uint64_t, uint64_t> length_larger_than_16 = std::make_pair(0, 0); DistBVCPLL() = default; explicit DistBVCPLL( const DistGraph &G); // UnweightedDist dist_distance_query_pair( // VertexID a_global, // VertexID b_global, // const DistGraph &G); }; // class DistBVCPLL template <VertexID BATCH_SIZE> DistBVCPLL<BATCH_SIZE>:: DistBVCPLL( const DistGraph &G) { num_v = G.num_v; assert(num_v >= BATCH_SIZE); num_masters = G.num_masters; host_id = G.host_id; // { // if (1 == host_id) { // volatile int i = 0; // while (i == 0) { // sleep(5); // } // } // } num_hosts = G.num_hosts; V_ID_Type = G.V_ID_Type; // L.resize(num_v); L.resize(num_masters); VertexID remainer = num_v % BATCH_SIZE; VertexID b_i_bound = num_v / BATCH_SIZE; std::vector<uint8_t> used_bp_roots(num_v, 0); //cache_miss.measure_start(); double time_labeling = -WallTimer::get_time_mark(); bp_labeling_time -= WallTimer::get_time_mark(); bit_parallel_labeling(G, used_bp_roots); bp_labeling_time += WallTimer::get_time_mark(); {//test //#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("host_id: %u bp_labeling_finished.\n", host_id); } //#endif } std::vector<VertexID> active_queue(num_masters); // Any vertex v who is active should be put into this queue. VertexID end_active_queue = 0; std::vector<uint8_t> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. // std::vector<bool> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. std::vector<VertexID> got_candidates_queue(num_masters); // Any vertex v who got candidates should be put into this queue. VertexID end_got_candidates_queue = 0; std::vector<uint8_t> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue // std::vector<bool> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue std::vector<ShortIndex> short_index(num_masters); std::vector< std::vector<UnweightedDist> > dist_table(BATCH_SIZE, std::vector<UnweightedDist>(num_v, MAX_UNWEIGHTED_DIST)); std::vector<VertexID> once_candidated_queue(num_masters); // if short_index[v].indicator.any() is true, v is in the queue. // Used mainly for resetting short_index[v].indicator. VertexID end_once_candidated_queue = 0; std::vector<uint8_t> once_candidated(num_masters, false); // std::vector<bool> once_candidated(num_masters, false); std::vector< std::vector<VertexID> > recved_dist_table(BATCH_SIZE); // Some distances are from other hosts. This is used to reset the dist_table. std::vector<BPLabelType> bp_labels_table(BATCH_SIZE); // All roots' bit-parallel labels //printf("b_i_bound: %u\n", b_i_bound);//test for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { //#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("b_i: %u\n", b_i);//test } //#endif batch_process( G, b_i, b_i * BATCH_SIZE, BATCH_SIZE, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); dump_labels( G, b_i * BATCH_SIZE, BATCH_SIZE); // exit(EXIT_SUCCESS); //test } if (remainer != 0) { //#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("b_i: %u\n", b_i_bound);//test } //#endif batch_process( G, b_i_bound, b_i_bound * BATCH_SIZE, remainer, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); dump_labels( G, b_i_bound * BATCH_SIZE, remainer); } time_labeling += WallTimer::get_time_mark(); //cache_miss.measure_stop(); // Test setlocale(LC_NUMERIC, ""); if (0 == host_id) { printf("BATCH_SIZE: %u\n", BATCH_SIZE); printf("BP_Size: %u\n", BITPARALLEL_SIZE); } {// Total Number of Labels EdgeID local_num_labels = 0; for (VertexID v_global = 0; v_global < num_v; ++v_global) { if (G.get_master_host_id(v_global) != host_id) { continue; } local_num_labels += L[G.get_local_vertex_id(v_global)].vertices.size(); } EdgeID global_num_labels; MPI_Allreduce(&local_num_labels, &global_num_labels, 1, MPI_Instance::get_mpi_datatype<EdgeID>(), MPI_SUM, MPI_COMM_WORLD); // printf("host_id: %u local_num_labels: %lu %.2f%%\n", host_id, local_num_labels, 100.0 * local_num_labels / global_num_labels); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("Global_num_labels: %lu average: %f\n", global_num_labels, 1.0 * global_num_labels / num_v); } // VertexID local_num_batches = 0; // VertexID local_num_distances = 0; //// double local_avg_distances_per_batches = 0; // for (VertexID v_global = 0; v_global < num_v; ++v_global) { // if (G.get_master_host_id(v_global) != host_id) { // continue; // } // VertexID v_local = G.get_local_vertex_id(v_global); // local_num_batches += L[v_local].batches.size(); // local_num_distances += L[v_local].distances.size(); //// double avg_d_p_b = 0; //// for (VertexID i_b = 0; i_b < L[v_local].batches.size(); ++i_b) { //// avg_d_p_b += L[v_local].batches[i_b].size; //// } //// avg_d_p_b /= L[v_local].batches.size(); //// local_avg_distances_per_batches += avg_d_p_b; // } //// local_avg_distances_per_batches /= num_masters; //// double local_avg_batches = local_num_batches * 1.0 / num_masters; //// double local_avg_distances = local_num_distances * 1.0 / num_masters; // uint64_t global_num_batches = 0; // uint64_t global_num_distances = 0; // MPI_Allreduce( // &local_num_batches, // &global_num_batches, // 1, // MPI_UINT64_T, // MPI_SUM, // MPI_COMM_WORLD); //// global_avg_batches /= num_hosts; // MPI_Allreduce( // &local_num_distances, // &global_num_distances, // 1, // MPI_UINT64_T, // MPI_SUM, // MPI_COMM_WORLD); //// global_avg_distances /= num_hosts; // double global_avg_d_p_b = global_num_distances * 1.0 / global_num_batches; // double global_avg_l_p_d = global_num_labels * 1.0 / global_num_distances; // double global_avg_batches = global_num_batches / num_v; // double global_avg_distances = global_num_distances / num_v; //// MPI_Allreduce( //// &local_avg_distances_per_batches, //// &global_avg_d_p_b, //// 1, //// MPI_DOUBLE, //// MPI_SUM, //// MPI_COMM_WORLD); //// global_avg_d_p_b /= num_hosts; // MPI_Barrier(MPI_COMM_WORLD); // if (0 == host_id) { // printf("global_avg_batches: %f " // "global_avg_distances: %f " // "global_avg_distances_per_batch: %f " // "global_avg_labels_per_distance: %f\n", // global_avg_batches, // global_avg_distances, // global_avg_d_p_b, // global_avg_l_p_d); // } } // printf("BP_labeling: %f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100); // printf("Initializing: %f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100); // printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100); // printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100); // printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100); // printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100); // printf("Candidating: %f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100); // printf("Adding: %f %.2f%%\n", adding_time, adding_time / time_labeling * 100); // printf("distance_query_time: %f %.2f%%\n", distance_query_time, distance_query_time / time_labeling * 100); // uint64_t total_check_count = bp_hit_count + normal_check_count; // printf("total_check_count: %'llu\n", total_check_count); // printf("bp_hit_count: %'llu %.2f%%\n", // bp_hit_count, // bp_hit_count * 100.0 / total_check_count); // printf("normal_check_count: %'llu %.2f%%\n", normal_check_count, normal_check_count * 100.0 / total_check_count); // printf("total_candidates_num: %'llu set_candidates_num: %'llu %.2f%%\n", // total_candidates_num, // set_candidates_num, // set_candidates_num * 100.0 / total_candidates_num); // printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n", // normal_hit_count, // normal_hit_count * 100.0 / total_check_count, // normal_hit_count * 100.0 / (total_check_count - bp_hit_count)); //cache_miss.print(); // printf("Candidating: "); candidating_ins_count.print(); // printf("Adding: "); adding_ins_count.print(); // printf("BP_Labeling: "); bp_labeling_ins_count.print(); // printf("BP_Checking: "); bp_checking_ins_count.print(); // printf("distance_query: "); dist_query_ins_count.print(); printf("num_hosts: %u host_id: %u\n" "Local_labeling_time: %.2f seconds\n" "bp_labeling_time: %.2f %.2f%%\n" "initializing_time: %.2f %.2f%%\n" "scatter_time: %.2f %.2f%%\n" "gather_time: %.2f %.2f%%\n" "clearup_time: %.2f %.2f%%\n" "message_time: %.2f %.2f%%\n", num_hosts, host_id, time_labeling, bp_labeling_time, 100.0 * bp_labeling_time / time_labeling, initializing_time, 100.0 * initializing_time / time_labeling, scatter_time, 100.0 * scatter_time / time_labeling, gather_time, 100.0 * gather_time / time_labeling, clearup_time, 100.0 * clearup_time / time_labeling, message_time, 100.0 * message_time / time_labeling); double global_time_labeling; MPI_Allreduce(&time_labeling, &global_time_labeling, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("Global_labeling_time: %.2f seconds\n", global_time_labeling); } // End test } //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_labeling( // const DistGraph &G, // std::vector<uint8_t> &used_bp_roots) //{ //// VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; // // std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_v); // active queue // std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // while (r < num_v && used_bp_roots[r]) { // ++r; // } // if (r == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // used_bp_roots[r] = true; // // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // tmp_d[r] = 0; // que_t1 = que_h; // // int ns = 0; // number of selected neighbor, default 64 // // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. //// VertexID i_bound = G.vertices[r] - 1; //// VertexID i_start = i_bound + G.out_degrees[r]; //// for (VertexID i = i_start; i > i_bound; --i) { // //int i_bound = G.vertices[r]; // //int i_start = i_bound + G.out_degrees[r] - 1; // //for (int i = i_start; i >= i_bound; --i) { // VertexID d_i_bound = G.local_out_degrees[r]; // EdgeID i_start = G.vertices_idx[r] + d_i_bound - 1; // for (VertexID d_i = 0; d_i < d_i_bound; ++d_i) { // EdgeID i = i_start - d_i; // VertexID v = G.out_edges[i]; // if (!used_bp_roots[v]) { // used_bp_roots[v] = true; // // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) // que[que_h++] = v; // tmp_d[v] = 1; // tmp_s[v].first = 1ULL << ns; // if (++ns == 64) break; // } // } // //} //// } // // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // for (VertexID que_i = que_t0; que_i < que_t1; ++que_i) { // VertexID v = que[que_i]; //// bit_parallel_push_labels(G, //// v, //// que, //// que_h, //// sibling_es, //// num_sibling_es, //// child_es, //// num_child_es, //// tmp_d, //// d); // EdgeID i_start = G.vertices_idx[v]; // EdgeID i_bound = i_start + G.local_out_degrees[v]; // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv = G.out_edges[i]; // UnweightedDist td = d + 1; // // if (d > tmp_d[tv]) { // ; // } // else if (d == tmp_d[tv]) { // if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v; // sibling_es[num_sibling_es].second = tv; // ++num_sibling_es; // } // } else { // d < tmp_d[tv] // if (tmp_d[tv] == MAX_UNWEIGHTED_DIST) { // que[que_h++] = tv; // tmp_d[tv] = td; // } // child_es[num_child_es].first = v; // child_es[num_child_es].second = tv; // ++num_child_es; // } // } // } // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // tmp_s[w].second |= tmp_s[v].first; // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // // {// test // printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (4 == d) { //// exit(EXIT_SUCCESS); //// } // } // // que_t0 = que_t1; // que_t1 = que_h; // } // // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } // //} template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_push_labels( const DistGraph &G, const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, const UnweightedDist iter) { EdgeID i_start = G.vertices_idx[v_global]; EdgeID i_bound = i_start + G.local_out_degrees[v_global]; // {//test // printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); // } for (EdgeID i = i_start; i < i_bound; ++i) { VertexID tv_global = G.out_edges[i]; VertexID tv_local = G.get_local_vertex_id(tv_global); UnweightedDist td = iter + 1; if (iter > dists[tv_local]) { ; } else if (iter == dists[tv_local]) { if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].first = v_global; tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].second = tv_global; ++size_tmp_sibling_es; // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; } } else { // iter < dists[tv] if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { if (CAS(dists.data() + tv_local, MAX_UNWEIGHTED_DIST, td)) { tmp_q[offset_tmp_q + size_tmp_q++] = tv_global; } } // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } tmp_child_es[offset_tmp_q + size_tmp_child_es].first = v_global; tmp_child_es[offset_tmp_q + size_tmp_child_es].second = tv_global; ++size_tmp_child_es; // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; } } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_labeling( const DistGraph &G, // std::vector<IndexType> &L, std::vector<uint8_t> &used_bp_roots) { // Class type of Bit-Parallel label message unit. struct MsgUnitBP { VertexID v_global; uint64_t S_n1; uint64_t S_0; MsgUnitBP() = default; // MsgUnitBP(MsgUnitBP&& other) = default; // MsgUnitBP(MsgUnitBP& other) = default; // MsgUnitBP& operator=(const MsgUnitBP& other) = default; // MsgUnitBP& operator=(MsgUnitBP&& other) = default; MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) : v_global(v), S_n1(sn1), S_0(s0) { } }; // VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; EdgeID local_num_edges = G.num_edges_local; std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<VertexID> que(num_masters); // active queue VertexID end_que = 0; std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que VertexID end_tmp_que = 0; std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. VertexID r_global = 0; // root r for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // {// test // if (0 == host_id) { // printf("i_bpsp: %u\n", i_bpspt); // } // } // Select the root r_global if (0 == host_id) { while (r_global < num_v && used_bp_roots[r_global]) { ++r_global; } if (r_global == num_v) { for (VertexID v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; } continue; } } // Broadcast the r here. message_time -= WallTimer::get_time_mark(); MPI_Bcast(&r_global, 1, V_ID_Type, 0, MPI_COMM_WORLD); message_time += WallTimer::get_time_mark(); used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // Mark the r_global if (G.get_master_host_id(r_global) == host_id) { tmp_d[G.get_local_vertex_id(r_global)] = 0; que[end_que++] = r_global; } // Select the r_global's 64 neighbors { // Get r_global's neighbors into buffer_send, rank from high to low. VertexID local_degree = G.local_out_degrees[r_global]; std::vector<VertexID> buffer_send(local_degree); if (local_degree) { EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; for (VertexID d_i = 0; d_i < local_degree; ++d_i) { EdgeID e_i = e_i_start - d_i; buffer_send[d_i] = G.out_edges[e_i]; } } // Get selected neighbors (up to 64) std::vector<VertexID> selected_nbrs; if (0 != host_id) { // Every host other than 0 sends neighbors to host 0 message_time -= WallTimer::get_time_mark(); MPI_Instance::send_buffer_2_dst(buffer_send, 0, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); // Receive selected neighbors from host 0 MPI_Instance::recv_buffer_from_src(selected_nbrs, 0, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); message_time += WallTimer::get_time_mark(); } else { // Host 0 // Host 0 receives neighbors from others std::vector<VertexID> all_nbrs(buffer_send); std::vector<VertexID > buffer_recv; for (int loc = 0; loc < num_hosts - 1; ++loc) { message_time -= WallTimer::get_time_mark(); MPI_Instance::recv_buffer_from_any(buffer_recv, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); message_time += WallTimer::get_time_mark(); if (buffer_recv.empty()) { continue; } buffer_send.resize(buffer_send.size() + buffer_recv.size()); std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); all_nbrs.resize(buffer_send.size()); all_nbrs.assign(buffer_send.begin(), buffer_send.end()); } assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // Select 64 (or less) neighbors VertexID ns = 0; // number of selected neighbor, default 64 for (VertexID v_global : all_nbrs) { if (used_bp_roots[v_global]) { continue; } used_bp_roots[v_global] = 1; selected_nbrs.push_back(v_global); if (++ns == 64) { break; } } // Send selected neighbors to other hosts message_time -= WallTimer::get_time_mark(); for (int dest = 1; dest < num_hosts; ++dest) { MPI_Instance::send_buffer_2_dst(selected_nbrs, dest, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); } message_time += WallTimer::get_time_mark(); } // {//test // printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); // } // Synchronize the used_bp_roots. for (VertexID v_global : selected_nbrs) { used_bp_roots[v_global] = 1; } // Mark selected neighbors for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { VertexID v_global = selected_nbrs[v_i]; if (host_id != G.get_master_host_id(v_global)) { continue; } tmp_que[end_tmp_que++] = v_global; tmp_d[G.get_local_vertex_id(v_global)] = 1; tmp_s[v_global].first = 1ULL << v_i; } } // Reduce the global number of active vertices VertexID global_num_actives = 1; UnweightedDist d = 0; while (global_num_actives) { //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("d: %u que_size: %u\n", d, global_num_actives); // } // } //#endif // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { VertexID num_sibling_es = 0, num_child_es = 0; // Send active masters to mirrors { std::vector<MsgUnitBP> buffer_send(end_que); for (VertexID que_i = 0; que_i < end_que; ++que_i) { VertexID v_global = que[que_i]; buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); } // {// test // printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); // } for (int root = 0; root < num_hosts; ++root) { std::vector<MsgUnitBP> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } // For parallel adding to queue VertexID size_buffer_recv = buffer_recv.size(); std::vector<VertexID> offsets_tmp_q(size_buffer_recv); #pragma omp parallel for for (VertexID i_q = 0; i_q < size_buffer_recv; ++i_q) { offsets_tmp_q[i_q] = G.local_out_degrees[buffer_recv[i_q].v_global]; } VertexID num_neighbors = PADO::prefix_sum_for_offsets(offsets_tmp_q); std::vector<VertexID> tmp_q(num_neighbors); std::vector<VertexID> sizes_tmp_q(size_buffer_recv, 0); // For parallel adding to sibling_es std::vector< std::pair<VertexID, VertexID> > tmp_sibling_es(num_neighbors); std::vector<VertexID> sizes_tmp_sibling_es(size_buffer_recv, 0); // For parallel adding to child_es std::vector< std::pair<VertexID, VertexID> > tmp_child_es(num_neighbors); std::vector<VertexID> sizes_tmp_child_es(size_buffer_recv, 0); #pragma omp parallel for // for (const MsgUnitBP &m : buffer_recv) { for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { const MsgUnitBP &m = buffer_recv[i_m]; VertexID v_global = m.v_global; if (!G.local_out_degrees[v_global]) { continue; } tmp_s[v_global].first = m.S_n1; tmp_s[v_global].second = m.S_0; // Push labels bit_parallel_push_labels( G, v_global, tmp_q, sizes_tmp_q[i_m], tmp_sibling_es, sizes_tmp_sibling_es[i_m], tmp_child_es, sizes_tmp_child_es[i_m], offsets_tmp_q[i_m], // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, tmp_d, d); } {// From tmp_sibling_es to sibling_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_sibling_es); PADO::collect_into_queue( tmp_sibling_es, offsets_tmp_q, sizes_tmp_sibling_es, total_size_tmp, sibling_es, num_sibling_es); } {// From tmp_child_es to child_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_child_es); PADO::collect_into_queue( tmp_child_es, offsets_tmp_q, sizes_tmp_child_es, total_size_tmp, child_es, num_child_es); } {// From tmp_q to tmp_que idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_q); PADO::collect_into_queue( tmp_q, offsets_tmp_q, sizes_tmp_q, total_size_tmp, tmp_que, end_tmp_que); } // {// test // printf("host_id: %u root: %u done push.\n", host_id, root); // } } } // Update the sets in tmp_s { #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first, w = sibling_es[i].second; __atomic_or_fetch(&tmp_s[v].second, tmp_s[w].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[w].second, tmp_s[v].first, __ATOMIC_SEQ_CST); // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; } // Put into the buffer sending to others std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first; VertexID w = sibling_es[i].second; buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); } // Send the messages for (int root = 0; root < num_hosts; ++root) { std::vector< std::pair<VertexID, uint64_t> > buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } size_t i_m_bound = buffer_recv.size(); #pragma omp parallel for for (size_t i_m = 0; i_m < i_m_bound; ++i_m) { const auto &m = buffer_recv[i_m]; __atomic_or_fetch(&tmp_s[m.first].second, m.second, __ATOMIC_SEQ_CST); } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } } #pragma omp parallel for for (VertexID i = 0; i < num_child_es; ++i) { VertexID v = child_es[i].first, c = child_es[i].second; __atomic_or_fetch(&tmp_s[c].first, tmp_s[v].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[c].second, tmp_s[v].second, __ATOMIC_SEQ_CST); // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; } } //#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // //// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (0 == d) { //// exit(EXIT_SUCCESS); //// } // } //#endif // Swap que and tmp_que tmp_que.swap(que); end_que = end_tmp_que; end_tmp_que = 0; MPI_Allreduce(&end_que, &global_num_actives, 1, V_ID_Type, MPI_SUM, MPI_COMM_WORLD); // } ++d; } #pragma omp parallel for for (VertexID v_local = 0; v_local < num_masters; ++v_local) { VertexID v_global = G.get_global_vertex_id(v_local); L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_push_labels( // const DistGraph &G, // const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // const UnweightedDist iter) //{ // EdgeID i_start = G.vertices_idx[v_global]; // EdgeID i_bound = i_start + G.local_out_degrees[v_global]; //// {//test //// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); //// } // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv_global = G.out_edges[i]; // VertexID tv_local = G.get_local_vertex_id(tv_global); // UnweightedDist td = iter + 1; // // if (iter > dists[tv_local]) { // ; // } else if (iter == dists[tv_local]) { // if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; // } // } else { // iter < dists[tv] // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; //// { //// printf("host_id: %u num_child_es: %u v_global: %u tv_global: %u\n", host_id, num_child_es, v_global, tv_global);//test //// } // } // } // //} // //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots) //{ // // Class type of Bit-Parallel label message unit. // struct MsgUnitBP { // VertexID v_global; // uint64_t S_n1; // uint64_t S_0; // // MsgUnitBP() = default; //// MsgUnitBP(MsgUnitBP&& other) = default; //// MsgUnitBP(MsgUnitBP& other) = default; //// MsgUnitBP& operator=(const MsgUnitBP& other) = default; //// MsgUnitBP& operator=(MsgUnitBP&& other) = default; // MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) // : v_global(v), S_n1(sn1), S_0(s0) { } // }; //// VertexID num_v = G.num_v; //// EdgeID num_e = G.num_e; // EdgeID local_num_edges = G.num_edges_local; // // std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_masters); // active queue // VertexID end_que = 0; // std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que // VertexID end_tmp_que = 0; // std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. // //// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v //// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} //// std::vector<VertexID> que(num_v); // active queue //// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) //// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r_global = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // // Select the root r_global // if (0 == host_id) { // while (r_global < num_v && used_bp_roots[r_global]) { // ++r_global; // } // if (r_global == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // } // // Broadcast the r here. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&r_global, // 1, // V_ID_Type, // 0, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // //// VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // // Mark the r_global // if (G.get_master_host_id(r_global) == host_id) { // tmp_d[G.get_local_vertex_id(r_global)] = 0; // que[end_que++] = r_global; // } // // Select the r_global's 64 neighbors // { // // Get r_global's neighbors into buffer_send, rank from low to high. // VertexID local_degree = G.local_out_degrees[r_global]; // std::vector<VertexID> buffer_send(local_degree); // if (local_degree) { // EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; // for (VertexID d_i = 0; d_i < local_degree; ++d_i) { // EdgeID e_i = e_i_start - d_i; // buffer_send[d_i] = G.out_edges[e_i]; // } // } // // // Get selected neighbors (up to 64) // std::vector<VertexID> selected_nbrs; // if (0 != host_id) { // // Every host other than 0 sends neighbors to host 0 // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // 0, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); // // Receive selected neighbors from host 0 // MPI_Instance::recv_buffer_from_src(selected_nbrs, // 0, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // } else { // // Host 0 // // Host 0 receives neighbors from others // std::vector<VertexID> all_nbrs(buffer_send); // std::vector<VertexID > buffer_recv; // for (int loc = 0; loc < num_hosts - 1; ++loc) { // message_time -= WallTimer::get_time_mark(); // MPI_Instance::recv_buffer_from_any(buffer_recv, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); //// MPI_Instance::receive_dynamic_buffer_from_any(buffer_recv, //// num_hosts, //// SENDING_ROOT_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // if (buffer_recv.empty()) { // continue; // } // // buffer_send.resize(buffer_send.size() + buffer_recv.size()); // std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); // all_nbrs.resize(buffer_send.size()); // all_nbrs.assign(buffer_send.begin(), buffer_send.end()); // } // assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // // Select 64 (or less) neighbors // VertexID ns = 0; // number of selected neighbor, default 64 // for (VertexID v_global : all_nbrs) { // if (used_bp_roots[v_global]) { // continue; // } // used_bp_roots[v_global] = 1; // selected_nbrs.push_back(v_global); // if (++ns == 64) { // break; // } // } // // Send selected neighbors to other hosts // message_time -= WallTimer::get_time_mark(); // for (int dest = 1; dest < num_hosts; ++dest) { // MPI_Instance::send_buffer_2_dst(selected_nbrs, // dest, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // } // message_time += WallTimer::get_time_mark(); // } //// {//test //// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); //// } // // // Synchronize the used_bp_roots. // for (VertexID v_global : selected_nbrs) { // used_bp_roots[v_global] = 1; // } // // // Mark selected neighbors // for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { // VertexID v_global = selected_nbrs[v_i]; // if (host_id != G.get_master_host_id(v_global)) { // continue; // } // tmp_que[end_tmp_que++] = v_global; // tmp_d[G.get_local_vertex_id(v_global)] = 1; // tmp_s[v_global].first = 1ULL << v_i; // } // } // // // Reduce the global number of active vertices // VertexID global_num_actives = 1; // UnweightedDist d = 0; // while (global_num_actives) { //// for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // // // Send active masters to mirrors // { // std::vector<MsgUnitBP> buffer_send(end_que); // for (VertexID que_i = 0; que_i < end_que; ++que_i) { // VertexID v_global = que[que_i]; // buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); // } //// {// test //// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); //// } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgUnitBP> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgUnitBP &m : buffer_recv) { // VertexID v_global = m.v_global; // if (!G.local_out_degrees[v_global]) { // continue; // } // tmp_s[v_global].first = m.S_n1; // tmp_s[v_global].second = m.S_0; // // Push labels // bit_parallel_push_labels(G, // v_global, // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, // tmp_d, // d); // } //// {// test //// printf("host_id: %u root: %u done push.\n", host_id, root); //// } // } // } // // // Update the sets in tmp_s // { // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; // // } // // Put into the buffer sending to others // std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); //// std::vector< std::vector<MPI_Request> > requests_list(num_hosts - 1); // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first; // VertexID w = sibling_es[i].second; //// buffer_send.emplace_back(v, tmp_s[v].second); //// buffer_send.emplace_back(w, tmp_s[w].second); // buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); // buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); // } // // Send the messages // for (int root = 0; root < num_hosts; ++root) { // std::vector< std::pair<VertexID, uint64_t> > buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // } ////#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // } ////#endif // // // Swap que and tmp_que // tmp_que.swap(que); // end_que = end_tmp_que; // end_tmp_que = 0; // MPI_Allreduce(&end_que, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // //// } // ++d; // } // // for (VertexID v_local = 0; v_local < num_masters; ++v_local) { // VertexID v_global = G.get_global_vertex_id(v_local); // L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; // L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} // L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } //} //// Function bit parallel checking: //// return false if shortest distance exits in bp labels, return true if bp labels cannot cover the distance //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline bool DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_checking( // VertexID v_id, // VertexID w_id, // const std::vector<IndexType> &L, // UnweightedDist iter) //{ // // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already // const IndexType &Lv = L[v_id]; // const IndexType &Lw = L[w_id]; // // _mm_prefetch(&Lv.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lv.bp_sets[0][0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_sets[0][0], _MM_HINT_T0); // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = Lv.bp_dist[i] + Lw.bp_dist[i]; // Use type VertexID in case of addition of two INF. // if (td - 2 <= iter) { // td += // (Lv.bp_sets[i][0] & Lw.bp_sets[i][0]) ? -2 : // ((Lv.bp_sets[i][0] & Lw.bp_sets[i][1]) | // (Lv.bp_sets[i][1] & Lw.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { //// ++bp_hit_count; // return false; // } // } // } // return true; //} // Function for initializing at the begin of a batch // For a batch, initialize the temporary labels and real labels of roots; // traverse roots' labels to initialize distance buffer; // unset flag arrays is_active and got_labels template <VertexID BATCH_SIZE> inline VertexID DistBVCPLL<BATCH_SIZE>:: initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots) { // Get the roots_master_local, containing all local roots. std::vector<VertexID> roots_master_local; VertexID roots_bound = roots_start + roots_size; for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { roots_master_local.push_back(G.get_local_vertex_id(r_global)); } } VertexID size_roots_master_local = roots_master_local.size(); // Short_index { if (end_once_candidated_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } else { for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } end_once_candidated_queue = 0; if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels // short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself // short_index[r_local].indicator.set(BATCH_SIZE); // v got labels } } else { for (VertexID r_local : roots_master_local) { short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels // short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself // short_index[r_local].indicator.set(BATCH_SIZE); // v got labels } } } // // Real Index { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; IndexType &Lr = L[r_local]; Lr.batches.emplace_back( b_id, // Batch ID Lr.distances.size(), // start_index 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } else { for (VertexID r_local : roots_master_local) { IndexType &Lr = L[r_local]; Lr.batches.emplace_back( b_id, // Batch ID Lr.distances.size(), // start_index 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } } // Dist Table { // struct LabelTableUnit { // VertexID root_id; // VertexID label_global_id; // UnweightedDist dist; // // LabelTableUnit() = default; // // LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : // root_id(r), label_global_id(l), dist(d) {} // }; std::vector<LabelTableUnit> buffer_send; // buffer for sending // Dist_matrix { // Deprecated Old method: unpack the IndexType structure before sending. // Okay, it's back. if (size_roots_master_local >= THRESHOLD_PARALLEL) { // Offsets for adding labels to buffer_send in parallel std::vector<VertexID> offsets_beffer_send(size_roots_master_local); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; offsets_beffer_send[i_r] = L[r_local].vertices.size(); } EdgeID size_labels = PADO::prefix_sum_for_offsets(offsets_beffer_send); buffer_send.resize(size_labels); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; VertexID top_location = 0; IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; VertexID b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; VertexID dist_start_index = Lr.batches[b_i].start_index; VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending buffer_send[offsets_beffer_send[i_r] + top_location++] = LabelTableUnit(r_root_id, Lr.vertices[v_i] + id_offset, dist); } } } } } else { for (VertexID r_local : roots_master_local) { // The distance table. IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; VertexID b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; VertexID dist_start_index = Lr.batches[b_i].start_index; VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table // dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = dist; // distance table buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, dist); // buffer for sending } } } } } } // Broadcast local roots labels for (int root = 0; root < num_hosts; ++root) { std::vector<LabelTableUnit> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record root_id's number of its received label, for later adding to recved_dist_table __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); // recved_dist_table[root_id].push_back(label_global_id); } // Record the received label in recved_dist_table, for later reset #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID &size = sizes_recved_root_labels[root_id]; if (size) { recved_dist_table[root_id].resize(size); size = 0; } } #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], label_global_id); } } else { for (const LabelTableUnit &l : buffer_recv) { VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record the received label in recved_dist_table, for later reset recved_dist_table[root_id].push_back(label_global_id); } } } } // Build the Bit-Parallel Labels Table { // struct MsgBPLabel { // VertexID r_root_id; // UnweightedDist bp_dist[BITPARALLEL_SIZE]; // uint64_t bp_sets[BITPARALLEL_SIZE][2]; // // MsgBPLabel() = default; // MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) // : r_root_id(r) // { // memcpy(bp_dist, dist, sizeof(bp_dist)); // memcpy(bp_sets, sets, sizeof(bp_sets)); // } // }; // std::vector<MPI_Request> requests_send(num_hosts - 1); std::vector<MsgBPLabel> buffer_send; std::vector<VertexID> roots_queue; for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { if (G.get_master_host_id(r_global) != host_id) { continue; } roots_queue.push_back(r_global); } VertexID size_roots_queue = roots_queue.size(); if (size_roots_queue >= THRESHOLD_PARALLEL) { buffer_send.resize(size_roots_queue); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_queue; ++i_r) { VertexID r_global = roots_queue[i_r]; VertexID r_local = G.get_local_vertex_id(r_global); VertexID r_root = r_global - roots_start; // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); buffer_send[i_r] = MsgBPLabel(r_root, L[r_local].bp_dist, L[r_local].bp_sets); } } else { // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } for (VertexID r_global : roots_queue) { VertexID r_local = G.get_local_vertex_id(r_global); VertexID r_root = r_global - roots_start; // Local roots // memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // Prepare for sending buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); } } for (int root = 0; root < num_hosts; ++root) { std::vector<MsgBPLabel> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } VertexID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { const MsgBPLabel &m = buffer_recv[i_m]; VertexID r_root = m.r_root_id; memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); } } else { for (const MsgBPLabel &m : buffer_recv) { VertexID r_root = m.r_root_id; memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); } } } } // Active_queue VertexID global_num_actives = 0; // global number of active vertices. { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; active_queue[i_r] = r_local; } end_active_queue = size_roots_master_local; } else { for (VertexID r_local : roots_master_local) { active_queue[end_active_queue++] = r_local; } } // Get the global number of active vertices; message_time -= WallTimer::get_time_mark(); MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, MPI_SUM, MPI_COMM_WORLD); message_time += WallTimer::get_time_mark(); } return global_num_actives; } // Sequential Version //// Function for initializing at the begin of a batch //// For a batch, initialize the temporary labels and real labels of roots; //// traverse roots' labels to initialize distance buffer; //// unset flag arrays is_active and got_labels //template <VertexID BATCH_SIZE> //inline VertexID DistBVCPLL<BATCH_SIZE>:: //initialization( // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated, // VertexID b_id, // VertexID roots_start, // VertexID roots_size, //// std::vector<VertexID> &roots_master_local, // const std::vector<uint8_t> &used_bp_roots) //{ // // Get the roots_master_local, containing all local roots. // std::vector<VertexID> roots_master_local; // VertexID roots_bound = roots_start + roots_size; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { // roots_master_local.push_back(G.get_local_vertex_id(r_global)); // } // } // // Short_index // { // for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { // VertexID v_local = once_candidated_queue[v_i]; // short_index[v_local].indicator_reset(); // once_candidated[v_local] = 0; // } // end_once_candidated_queue = 0; // for (VertexID r_local : roots_master_local) { // short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels //// short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself //// short_index[r_local].indicator.set(BATCH_SIZE); // v got labels // } // } //// // // Real Index // { // for (VertexID r_local : roots_master_local) { // IndexType &Lr = L[r_local]; // Lr.batches.emplace_back( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1); // size // Lr.distances.emplace_back( // Lr.vertices.size(), // start_index // 1, // size // 0); // dist // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); // } // } // // // Dist Table // { //// struct LabelTableUnit { //// VertexID root_id; //// VertexID label_global_id; //// UnweightedDist dist; //// //// LabelTableUnit() = default; //// //// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : //// root_id(r), label_global_id(l), dist(d) {} //// }; // std::vector<LabelTableUnit> buffer_send; // buffer for sending // // Dist_matrix // { // // Deprecated Old method: unpack the IndexType structure before sending. // for (VertexID r_local : roots_master_local) { // // The distance table. // IndexType &Lr = L[r_local]; // VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // // Write into the dist_table //// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = dist; // distance table // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending // } // } // } // } // } // // Broadcast local roots labels // for (int root = 0; root < num_hosts; ++root) { // std::vector<LabelTableUnit> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const LabelTableUnit &l : buffer_recv) { // VertexID root_id = l.root_id; // VertexID label_global_id = l.label_global_id; // UnweightedDist dist = l.dist; // dist_table[root_id][label_global_id] = dist; // // Record the received label in recved_dist_table, for later reset // recved_dist_table[root_id].push_back(label_global_id); // } // } // } // // // Build the Bit-Parallel Labels Table // { //// struct MsgBPLabel { //// VertexID r_root_id; //// UnweightedDist bp_dist[BITPARALLEL_SIZE]; //// uint64_t bp_sets[BITPARALLEL_SIZE][2]; //// //// MsgBPLabel() = default; //// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) //// : r_root_id(r) //// { //// memcpy(bp_dist, dist, sizeof(bp_dist)); //// memcpy(bp_sets, sets, sizeof(bp_sets)); //// } //// }; //// std::vector<MPI_Request> requests_send(num_hosts - 1); // std::vector<MsgBPLabel> buffer_send; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } // VertexID r_local = G.get_local_vertex_id(r_global); // VertexID r_root = r_global - roots_start; // // Local roots //// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); //// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); // } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgBPLabel> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgBPLabel &m : buffer_recv) { // VertexID r_root = m.r_root_id; // memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // } // } // } // // // TODO: parallel enqueue // // Active_queue // VertexID global_num_actives = 0; // global number of active vertices. // { // for (VertexID r_local : roots_master_local) { // active_queue[end_active_queue++] = r_local; // } // // Get the global number of active vertices; // message_time -= WallTimer::get_time_mark(); // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // } // // return global_num_actives; //} //// Function: push v_head_global's newly added labels to its all neighbors. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // const BPLabelType &L_label = bp_labels_table[label_root_id]; // VertexID label_global_id = label_root_id + roots_start; // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // all remaining v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // if (v_tail_global <= label_global_id) { // // remaining v_tail_global has higher rank than the label // return; // } // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } //// {// Just for the complain from the compiler //// assert(iter >= iter); //// } //} template<VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter) { std::vector<std::pair<VertexID, VertexID> > buffer_send_indices; //.first: Vertex ID //.second: size of labels std::vector<VertexID> buffer_send_labels; if (local_size) { const VertexID start_active_queue = global_start; const VertexID size_active_queue = global_size <= local_size ? global_size : local_size; const VertexID bound_active_queue = start_active_queue + size_active_queue; buffer_send_indices.resize(size_active_queue); // Prepare offset for inserting std::vector<VertexID> offsets_buffer_locs(size_active_queue); #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active const IndexType &Lv = L[v_head_local]; offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; } EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); // {// test // if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID); // PADO::Utils::system_memory(memtotal, memfree); // printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n", // bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024); // } // } buffer_send_labels.resize(size_buffer_send_labels); // {// test // if (0 == host_id) { // printf("buffer_send_labels created.\n"); // } // } // Build buffer_send_labels by parallel inserting #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID tmp_i_q = i_q - start_active_queue; VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active VertexID v_head_global = G.get_global_vertex_id(v_head_local); const IndexType &Lv = L[v_head_local]; // Prepare the buffer_send_indices buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // These 2 index are used for traversing v_head's last inserted labels VertexID l_i_start = Lv.distances.rbegin()->start_index; VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; VertexID top_labels = offsets_buffer_locs[tmp_i_q]; for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { VertexID label_root_id = Lv.vertices[l_i]; buffer_send_labels[top_labels++] = label_root_id; // buffer_send_labels.push_back(label_root_id); } } } //////////////////////////////////////////////// //// // const VertexID bound_active_queue = start_active_queue + size_active_queue; // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(size_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // // Parallel Version // // Prepare offset for inserting // std::vector<VertexID> offsets_buffer_locs(size_active_queue); //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // const IndexType &Lv = L[v_head_local]; // offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; // } // EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); //// {// test //// if (0 == host_id) { //// double memtotal = 0; //// double memfree = 0; //// double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID); //// PADO::Utils::system_memory(memtotal, memfree); //// printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n", //// bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024); //// } //// } // buffer_send_labels.resize(size_buffer_send_labels); //// {// test //// if (0 == host_id) { //// printf("buffer_send_labels created.\n"); //// } //// } // // // Build buffer_send_labels by parallel inserting //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID tmp_i_q = i_q - start_active_queue; // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // VertexID top_labels = offsets_buffer_locs[tmp_i_q]; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels[top_labels++] = label_root_id; //// buffer_send_labels.push_back(label_root_id); // } // } //// end_active_queue = 0; //// //////////////////////////////////////////////// for (int root = 0; root < num_hosts; ++root) { // Get the indices std::vector<std::pair<VertexID, VertexID> > indices_buffer; one_host_bcasts_buffer_to_buffer(root, buffer_send_indices, indices_buffer); if (indices_buffer.empty()) { continue; } // Get the labels std::vector<VertexID> labels_buffer; one_host_bcasts_buffer_to_buffer(root, buffer_send_labels, labels_buffer); VertexID size_indices_buffer = indices_buffer.size(); // Prepare the offsets for reading indices_buffer std::vector<EdgeID> starts_locs_index(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; starts_locs_index[i_i] = e.second; } EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index); // Prepare the offsets for inserting v_tails into queue std::vector<VertexID> offsets_tmp_queue(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; offsets_tmp_queue[i_i] = G.local_out_degrees[e.first]; } EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue); std::vector<VertexID> tmp_got_candidates_queue(num_ngbrs); std::vector<VertexID> sizes_tmp_got_candidates_queue(size_indices_buffer, 0); std::vector<VertexID> tmp_once_candidated_queue(num_ngbrs); std::vector<VertexID> sizes_tmp_once_candidated_queue(size_indices_buffer, 0); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { VertexID v_head_global = indices_buffer[i_i].first; EdgeID start_index = starts_locs_index[i_i]; EdgeID bound_index = i_i != size_indices_buffer - 1 ? starts_locs_index[i_i + 1] : total_recved_labels; if (G.local_out_degrees[v_head_global]) { local_push_labels_para( v_head_global, start_index, bound_index, roots_start, labels_buffer, G, short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, tmp_got_candidates_queue, sizes_tmp_got_candidates_queue[i_i], offsets_tmp_queue[i_i], got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, tmp_once_candidated_queue, sizes_tmp_once_candidated_queue[i_i], once_candidated, bp_labels_table, used_bp_roots, iter); } } {// Collect elements from tmp_got_candidates_queue to got_candidates_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue); PADO::collect_into_queue( tmp_got_candidates_queue, offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue total_new, got_candidates_queue, end_got_candidates_queue); } {// Collect elements from tmp_once_candidated_queue to once_candidated_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); PADO::collect_into_queue( tmp_once_candidated_queue, offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue total_new, once_candidated_queue, end_once_candidated_queue); } } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator[label_root_id] = 1; {// Deal with race condition if (!PADO::CAS(SI_v_tail.indicator.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { // The label is already selected before continue; } } // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in if (PADO::CAS(once_candidated.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail_local; } // once_candidated[v_tail_local] = 1; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // const IndexType &L_label = L[label_global_id]; // _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); const BPLabelType &L_label = bp_labels_table[label_root_id]; bool no_need_add = false; for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; break; } } } if (no_need_add) { continue; } // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = 1; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!SI_v_tail.is_candidate[label_root_id]) { if (CAS(SI_v_tail.is_candidate.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { PADO::TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id); } } // Add into got_candidates queue // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = 1; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } if (!got_candidates[v_tail_local]) { if (CAS(got_candidates.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_got_candidates_queue[offset_tmp_queue + size_tmp_got_candidates_queue++] = v_tail_local; } } } } // { // assert(iter >= iter); // } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; if (SI_v_tail.indicator[label_root_id]) { // The label is already selected before continue; } // Record label_root_id as once selected by v_tail_global SI_v_tail.indicator[label_root_id] = 1; // SI_v_tail.indicator.set(label_root_id); // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in once_candidated[v_tail_local] = 1; once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // const IndexType &L_label = L[label_global_id]; // _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); const BPLabelType &L_label = bp_labels_table[label_root_id]; bool no_need_add = false; for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; break; } } } if (no_need_add) { continue; } if (SI_v_tail.is_candidate[label_root_id]) { continue; } SI_v_tail.is_candidate[label_root_id] = 1; SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!got_candidates[v_tail_local]) { // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) got_candidates[v_tail_local] = 1; got_candidates_queue[end_got_candidates_queue++] = v_tail_local; } } } // { // assert(iter >= iter); // } } //// Function: pushes v_head's labels to v_head's every (master) neighbor //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // // The data structure of a message //// std::vector< LabelUnitType > buffer_recv; // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin() -> start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin() -> size; // // Traverse v_head's every neighbor v_tail // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // // Traverse v_head's last inserted labels // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // VertexID label_global_id = label_root_id + roots_start; // if (v_tail_global <= label_global_id) { // // v_tail_global has higher rank than the label // continue; // } // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // const BPLabelType &L_label = bp_labels_table[label_root_id]; // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } // } // // { // assert(iter >= iter); // } //} //// DEPRECATED Function: in the scatter phase, synchronize local masters to mirrors on other hosts //// Has some mysterious problem: when I call this function, some hosts will receive wrong messages; when I copy all //// code of this function into the caller, all messages become right. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //sync_masters_2_mirrors( // const DistGraph &G, // const std::vector<VertexID> &active_queue, // VertexID end_active_queue, // std::vector< std::pair<VertexID, VertexID> > &buffer_send, // std::vector<MPI_Request> &requests_send //) //{ //// std::vector< std::pair<VertexID, VertexID> > buffer_send; // // pair.first: Owener vertex ID of the label // // pair.first: label vertex ID of the label // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send.emplace_back(v_head_global, label_root_id); //// {//test //// if (1 == host_id) { //// printf("@%u host_id: %u v_head_global: %u\n", __LINE__, host_id, v_head_global);// //// } //// } // } // } // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u sync_masters_2_mirrors: buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // assert(!requests_send.empty()); // } // // // Send messages // for (int loc = 0; loc < num_hosts - 1; ++loc) { // int dest_host_id = G.buffer_send_list_loc_2_master_host_id(loc); // MPI_Isend(buffer_send.data(), // MPI_Instance::get_sending_size(buffer_send), // MPI_CHAR, // dest_host_id, // SENDING_MASTERS_TO_MIRRORS, // MPI_COMM_WORLD, // &requests_send[loc]); // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u dest_host_id: %u buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, dest_host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // } // } //} // Function for distance query; // traverse vertex v_id's labels; // return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label. template <VertexID BATCH_SIZE> inline bool DistBVCPLL<BATCH_SIZE>:: distance_query( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter) { VertexID cand_real_id = cand_root_id + roots_start; const IndexType &Lv = L[v_id_local]; // Traverse v_id's all existing labels VertexID b_i_bound = Lv.batches.size(); _mm_prefetch(&Lv.batches[0], _MM_HINT_T0); _mm_prefetch(&Lv.distances[0], _MM_HINT_T0); _mm_prefetch(&Lv.vertices[0], _MM_HINT_T0); //_mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { VertexID id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; VertexID dist_start_index = Lv.batches[b_i].start_index; VertexID dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_table for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { UnweightedDist dist = Lv.distances[dist_i].dist; if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered. // If the half path distance is already greater than their targeted distance, jump to next batch break; } VertexID v_start_index = Lv.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lv.distances[dist_i].size; // _mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); _mm_prefetch(reinterpret_cast<const char *>(dist_table[cand_root_id].data()), _MM_HINT_T0); for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { VertexID v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id if (v >= cand_real_id) { // Vertex cand_real_id cannot have labels whose ranks are lower than it, // in which case dist_table[cand_root_id][v] does not exist. continue; } VertexID d_tmp = dist + dist_table[cand_root_id][v]; if (d_tmp <= iter) { return false; } } } } return true; } //// Sequential version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_seq( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send) // UnweightedDist iter) { L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { VertexID cand_real_id = cand_root_id + roots_start; // dist_table[v_root_id][cand_real_id] = iter; // Put the update into the buffer_send for later sending buffer_send.emplace_back(v_root_id, cand_real_id); } } //// Parallel Version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send) { L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { VertexID cand_real_id = cand_root_id + roots_start; // Put the update into the buffer_send for later sending // buffer_send.emplace_back(v_root_id, cand_real_id); tmp_buffer_send[offset_tmp_buffer_send + size_tmp_buffer_send++] = std::make_pair(v_root_id, cand_real_id); } } // Function updates those index arrays in v_id's label only if v_id has been inserted new labels template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: update_label_indices( VertexID v_id_local, VertexID inserted_count, // std::vector<IndexType> &L, std::vector<ShortIndex> &short_index, VertexID b_id, UnweightedDist iter) { IndexType &Lv = L[v_id_local]; // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch if (short_index[v_id_local].indicator[BATCH_SIZE]) { // Increase the batches' last element's size because a new distance element need to be added ++(Lv.batches.rbegin() -> size); } else { short_index[v_id_local].indicator[BATCH_SIZE] = 1; // short_index[v_id_local].indicator.set(BATCH_SIZE); // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added Lv.batches.emplace_back( b_id, // batch id Lv.distances.size(), // start index 1); // size } // Insert a new distance element with start_index, size, and dist Lv.distances.emplace_back( Lv.vertices.size() - inserted_count, // start index inserted_count, // size iter); // distance } // Function to reset dist_table the distance buffer to INF // Traverse every root's labels to reset its distance buffer elements to INF. // In this way to reduce the cost of initialization of the next batch. template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: reset_at_end( // const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table) { // // Reset dist_table according to local masters' labels // for (VertexID r_local_id : roots_master_local) { // IndexType &Lr = L[r_local_id]; // VertexID r_root_id = G.get_global_vertex_id(r_local_id) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_table // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = MAX_UNWEIGHTED_DIST; // } // } // } // } // Reset dist_table according to received masters' labels from other hosts for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { for (VertexID cand_real_id : recved_dist_table[r_root_id]) { dist_table[r_root_id][cand_real_id] = MAX_UNWEIGHTED_DIST; } recved_dist_table[r_root_id].clear(); } // Reset bit-parallel labels table for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { memset(bp_labels_table[r_root_id].bp_dist, 0, sizeof(bp_labels_table[r_root_id].bp_dist)); memset(bp_labels_table[r_root_id].bp_sets, 0, sizeof(bp_labels_table[r_root_id].bp_sets)); } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: batch_process( const DistGraph &G, const VertexID b_id, const VertexID roots_start, // start id of roots const VertexID roots_size, // how many roots in the batch const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated) // std::vector<bool> &once_candidated) { // At the beginning of a batch, initialize the labels L and distance buffer dist_table; initializing_time -= WallTimer::get_time_mark(); VertexID global_num_actives = initialization(G, short_index, dist_table, recved_dist_table, bp_labels_table, active_queue, end_active_queue, once_candidated_queue, end_once_candidated_queue, once_candidated, b_id, roots_start, roots_size, // roots_master_local, used_bp_roots); initializing_time += WallTimer::get_time_mark(); UnweightedDist iter = 0; // The iterator, also the distance for current iteration // {//test // if (0 == host_id) { // printf("host_id: %u initialization finished.\n", host_id); // } // } while (global_num_actives) { ++iter; //#ifdef DEBUG_MESSAGES_ON {//test // if (0 == host_id) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("iter: %u " "host_id: %d " "global_num_actives: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", iter, host_id, global_num_actives, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); // } } //#endif // Traverse active vertices to push their labels as candidates // Send masters' newly added labels to other hosts { scatter_time -= WallTimer::get_time_mark(); // Divide the pushing into many-time runs. const VertexID chunk_size = 1 << 20; VertexID remainder = global_num_actives % chunk_size; VertexID bound_global_i = global_num_actives - remainder; // VertexID remainder = end_active_queue % chunk_size; // VertexID bound_active_queue = end_active_queue - remainder; VertexID local_size; for (VertexID global_i = 0; global_i < bound_global_i; global_i += chunk_size) { if (global_i < end_active_queue) { local_size = end_active_queue - global_i; } else { local_size = 0; } schedule_label_pushing_para( G, roots_start, used_bp_roots, active_queue, global_i, chunk_size, local_size, got_candidates_queue, end_got_candidates_queue, short_index, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated, iter); } if (remainder) { if (bound_global_i < end_active_queue) { local_size = end_active_queue - bound_global_i; } else { local_size = 0; } schedule_label_pushing_para( G, roots_start, used_bp_roots, active_queue, bound_global_i, remainder, local_size, got_candidates_queue, end_got_candidates_queue, short_index, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated, iter); } // // schedule_label_pushing_para( // G, // roots_start, // used_bp_roots, // active_queue, // 0, // end_active_queue, // got_candidates_queue, // end_got_candidates_queue, // short_index, // bp_labels_table, // got_candidates, // is_active, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // iter); end_active_queue = 0; scatter_time += WallTimer::get_time_mark(); } //// For Backup // { // scatter_time -= WallTimer::get_time_mark(); // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // if (end_active_queue >= THRESHOLD_PARALLEL) { // // Parallel Version // // Prepare offset for inserting // std::vector<VertexID> offsets_buffer_locs(end_active_queue); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // const IndexType &Lv = L[v_head_local]; // offsets_buffer_locs[i_q] = Lv.distances.rbegin()->size; // } // EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); // buffer_send_labels.resize(size_buffer_send_labels); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID top_labels = 0; // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels[offsets_buffer_locs[i_q] + top_labels++] = label_root_id; //// buffer_send_labels.push_back(label_root_id); // } // } // } else { // // Sequential Version // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels.push_back(label_root_id); // } // } // } // end_active_queue = 0; // // for (int root = 0; root < num_hosts; ++root) { // // Get the indices // std::vector< std::pair<VertexID, VertexID> > indices_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_indices, // indices_buffer); // if (indices_buffer.empty()) { // continue; // } // // Get the labels // std::vector<VertexID> labels_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_labels, // labels_buffer); // // VertexID size_indices_buffer = indices_buffer.size(); // if (size_indices_buffer >= THRESHOLD_PARALLEL) { // // Prepare the offsets for reading indices_buffer // std::vector<EdgeID> starts_locs_index(size_indices_buffer); //#pragma omp parallel for // for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { // const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; // starts_locs_index[i_i] = e.second; // } // EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index); // // // Prepare the offsets for inserting v_tails into queue // std::vector<VertexID> offsets_tmp_queue(size_indices_buffer); //#pragma omp parallel for // for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { // const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; // offsets_tmp_queue[i_i] = G.local_out_degrees[e.first]; // } // EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue); // std::vector<VertexID> tmp_got_candidates_queue(num_ngbrs); // std::vector<VertexID> sizes_tmp_got_candidates_queue(size_indices_buffer, 0); // std::vector<VertexID> tmp_once_candidated_queue(num_ngbrs); // std::vector<VertexID> sizes_tmp_once_candidated_queue(size_indices_buffer, 0); //#pragma omp parallel for // for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { // VertexID v_head_global = indices_buffer[i_i].first; // EdgeID start_index = starts_locs_index[i_i]; // EdgeID bound_index = i_i != size_indices_buffer - 1 ? // starts_locs_index[i_i + 1] : total_recved_labels; // if (G.local_out_degrees[v_head_global]) { // local_push_labels_para( // v_head_global, // start_index, // bound_index, // roots_start, // labels_buffer, // G, // short_index, // // std::vector<VertexID> &got_candidates_queue, // // VertexID &end_got_candidates_queue, // tmp_got_candidates_queue, // sizes_tmp_got_candidates_queue[i_i], // offsets_tmp_queue[i_i], // got_candidates, // // std::vector<VertexID> &once_candidated_queue, // // VertexID &end_once_candidated_queue, // tmp_once_candidated_queue, // sizes_tmp_once_candidated_queue[i_i], // once_candidated, // bp_labels_table, // used_bp_roots, // iter); // } // } // // {// Collect elements from tmp_got_candidates_queue to got_candidates_queue // VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue); // PADO::collect_into_queue( // tmp_got_candidates_queue, // offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue // sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue // total_new, // got_candidates_queue, // end_got_candidates_queue); // } // {// Collect elements from tmp_once_candidated_queue to once_candidated_queue // VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); // PADO::collect_into_queue( // tmp_once_candidated_queue, // offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue // sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue // total_new, // once_candidated_queue, // end_once_candidated_queue); // } // } else { // // Sequential Version // // Push those labels // EdgeID start_index = 0; // for (const std::pair<VertexID, VertexID> &e : indices_buffer) { // VertexID v_head_global = e.first; // EdgeID bound_index = start_index + e.second; // if (G.local_out_degrees[v_head_global]) { // local_push_labels_seq( // v_head_global, // start_index, // bound_index, // roots_start, // labels_buffer, // G, // short_index, // got_candidates_queue, // end_got_candidates_queue, // got_candidates, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // bp_labels_table, // used_bp_roots, // iter); // } // start_index = bound_index; // } // } // } // scatter_time += WallTimer::get_time_mark(); // } // {//test // if (0 == host_id) { // printf("iter: %u pushing labels finished.\n", iter); // } // } // Traverse vertices in the got_candidates_queue to insert labels { gather_time -= WallTimer::get_time_mark(); std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // pair.first: root id // pair.second: label (global) id of the root // if (true) { if (end_got_candidates_queue >= THRESHOLD_PARALLEL) { // Prepare for parallel active_queue // Don't need offsets_tmp_active_queue here, because the index i_queue is the offset already. // Actually we still need offsets_tmp_active_queue, because collect_into_queue() needs it. std::vector<VertexID> offsets_tmp_active_queue(end_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { offsets_tmp_active_queue[i_q] = i_q; } std::vector<VertexID> tmp_active_queue(end_got_candidates_queue); std::vector<VertexID> sizes_tmp_active_queue(end_got_candidates_queue, 0); // Size will only be 0 or 1, but it will become offsets eventually. // Prepare for parallel buffer_send std::vector<EdgeID> offsets_tmp_buffer_send(end_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { VertexID v_id_local = got_candidates_queue[i_q]; VertexID v_global_id = G.get_global_vertex_id(v_id_local); if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) { // If v_global_id is root, its new labels should be put into buffer_send offsets_tmp_buffer_send[i_q] = short_index[v_id_local].end_candidates_que; } else { offsets_tmp_buffer_send[i_q] = 0; } } EdgeID total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send); // {// test // if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // double bytes_buffer_send = total_send_labels * sizeof(VertexID); // PADO::Utils::system_memory(memtotal, memfree); // printf("bytes_tmp_buffer_send: %fGB memtotal: %fGB memfree: %fGB\n", // bytes_buffer_send / (1 << 30), memtotal / 1024, memfree / 1024); // } // } std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send(total_send_labels); // {// test // if (0 == host_id) { // printf("tmp_buffer_send created.\n"); // } // } std::vector<EdgeID> sizes_tmp_buffer_send(end_got_candidates_queue, 0); #pragma omp parallel for for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; tmp_active_queue[i_queue + sizes_tmp_active_queue[i_queue]++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_para( cand_root_id, v_id_local, roots_start, roots_size, G, tmp_buffer_send, sizes_tmp_buffer_send[i_queue], offsets_tmp_buffer_send[i_queue]); // buffer_send); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, short_index, b_id, iter); } } {// Collect elements from tmp_active_queue to active_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue); PADO::collect_into_queue( tmp_active_queue, offsets_tmp_active_queue, sizes_tmp_active_queue, total_new, active_queue, end_active_queue); } {// Collect elements from tmp_buffer_send to buffer_send EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send); // {// test // if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // double bytes_buffer_send = total_new * sizeof(VertexID); // PADO::Utils::system_memory(memtotal, memfree); // printf("bytes_buffer_send: %fGB memtotal: %fGB memfree: %fGB\n", // bytes_buffer_send / (1 << 30), memtotal / 1024, memfree / 1024); // } // } buffer_send.resize(total_new); // {// test // if (0 == host_id) { // printf("buffer_send created.\n"); // } // } EdgeID zero_size = 0; PADO::collect_into_queue( tmp_buffer_send, offsets_tmp_buffer_send, sizes_tmp_buffer_send, total_new, buffer_send, zero_size); // {//test // if (iter == 6) { // for (VertexID i_b = 0; i_b < total_new; ++i_b) { // const auto &e = buffer_send[i_b]; // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // if (root_id > 1024) { // printf("total_new: %lu " // "buffer_send[%u]: " // "root_id: %u " // "cand_real_id: %u\n", // total_new, // i_b, // root_id, // cand_real_id); // exit(1); // } // } // } // } } } else { for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; active_queue[end_active_queue++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_seq( cand_root_id, v_id_local, roots_start, roots_size, G, // dist_table, buffer_send); // iter); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, short_index, b_id, iter); } } } // {//test // printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); // } end_got_candidates_queue = 0; // Set the got_candidates_queue empty // {//test // if (iter == 6) { // for (VertexID i_b = 0; i_b < buffer_send.size(); ++i_b) { // const auto &e = buffer_send[i_b]; // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // if (root_id > 1024) { // printf("buffer_send.size(): %lu " // "buffer_send[%u]: " // "root_id: %u " // "cand_real_id: %u\n", // buffer_send.size(), // i_b, // root_id, // cand_real_id); // exit(1); // } // } // } // } // Sync the dist_table for (int root = 0; root < num_hosts; ++root) { std::vector<std::pair<VertexID, VertexID>> buffer_recv; // {//test //// if (iter == 6) { // if (buffer_send.size() == 66) { // printf("L%u: " // "iter: %u\n", // __LINE__, // iter); // exit(1); // for (VertexID i_b = 0; i_b < buffer_send.size(); ++i_b) { // const auto &e = buffer_send[i_b]; // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // if (root_id > 1024) { // printf("buffer_send.size(): %lu " // "buffer_send[%u]: " // "root_id: %u " // "cand_real_id: %u\n", // buffer_send.size(), // i_b, // root_id, // cand_real_id); // fflush(stdout); // exit(1); // } // } // } //// MPI_Barrier(MPI_COMM_WORLD); // } one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); {//test // if (6 == (VertexID) iter && size_buffer_recv == 66) { if (iter == 6 && size_buffer_recv == 66) { for (VertexID i_b = 0; i_b < size_buffer_recv; ++i_b) { const auto &e = buffer_recv[i_b]; VertexID root_id = e.first; VertexID cand_real_id = e.second; if (root_id > 1024) { printf("size_buffer_recv: %lu " "buffer_recv[%u]: " "root_id: %u " "cand_real_id: %u\n", size_buffer_recv, i_b, root_id, cand_real_id); exit(1); } } } } if (size_buffer_recv >= THRESHOLD_PARALLEL) { // Get label number for every root std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); } // Resize the recved_dist_table for every root #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID old_size = recved_dist_table[root_id].size(); VertexID tmp_size = sizes_recved_root_labels[root_id]; if (tmp_size) { recved_dist_table[root_id].resize(old_size + tmp_size); sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // If tmp_size == 0, root_id has no received labels. // sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // Recorde received labels in recved_dist_table #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], cand_real_id); } } else { for (const std::pair<VertexID, VertexID> &e : buffer_recv) { VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; // Record the received element, for future reset recved_dist_table[root_id].push_back(cand_real_id); } } } // Sync the global_num_actives MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, MPI_MAX, // MPI_SUM, MPI_COMM_WORLD); gather_time += WallTimer::get_time_mark(); } // {//test // if (0 == host_id) { // printf("iter: %u inserting labels finished.\n", iter); // } // } } // Reset the dist_table clearup_time -= WallTimer::get_time_mark(); reset_at_end( // G, // roots_start, // roots_master_local, dist_table, recved_dist_table, bp_labels_table); clearup_time += WallTimer::get_time_mark(); // {//test // if (0 == host_id) { // printf("host_id: %u resetting finished.\n", host_id); // } // } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>::dump_labels( const DistGraph &G, const VertexID roots_start, const VertexID roots_size) { const VertexID roots_bound = roots_start + roots_size; for (VertexID v_global = roots_start; v_global < roots_bound; ++v_global) { if (G.get_master_host_id(v_global) != host_id) { continue; } VertexID v_local = G.get_local_vertex_id(v_global); L[v_local].clean_all_indices(); } } //// Sequential Version //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //batch_process( // const DistGraph &G, // VertexID b_id, // VertexID roots_start, // start id of roots // VertexID roots_size, // how many roots in the batch // const std::vector<uint8_t> &used_bp_roots, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<uint8_t> &got_candidates, //// std::vector<bool> &got_candidates, // std::vector<uint8_t> &is_active, //// std::vector<bool> &is_active, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated) //// std::vector<bool> &once_candidated) //{ // // At the beginning of a batch, initialize the labels L and distance buffer dist_table; // initializing_time -= WallTimer::get_time_mark(); // VertexID global_num_actives = initialization(G, // short_index, // dist_table, // recved_dist_table, // bp_labels_table, // active_queue, // end_active_queue, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // b_id, // roots_start, // roots_size, //// roots_master_local, // used_bp_roots); // initializing_time += WallTimer::get_time_mark(); // UnweightedDist iter = 0; // The iterator, also the distance for current iteration //// {//test //// printf("host_id: %u initialization finished.\n", host_id); //// } // // // while (global_num_actives) { ////#ifdef DEBUG_MESSAGES_ON //// {// //// if (0 == host_id) { //// printf("iter: %u global_num_actives: %u\n", iter, global_num_actives); //// } //// } ////#endif // ++iter; // // Traverse active vertices to push their labels as candidates // // Send masters' newly added labels to other hosts // { // scatter_time -= WallTimer::get_time_mark(); // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels.push_back(label_root_id); // } // } // end_active_queue = 0; // // for (int root = 0; root < num_hosts; ++root) { // // Get the indices // std::vector< std::pair<VertexID, VertexID> > indices_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_indices, // indices_buffer); // if (indices_buffer.empty()) { // continue; // } // // Get the labels // std::vector<VertexID> labels_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_labels, // labels_buffer); // // Push those labels // EdgeID start_index = 0; // for (const std::pair<VertexID, VertexID> e : indices_buffer) { // VertexID v_head_global = e.first; // EdgeID bound_index = start_index + e.second; // if (G.local_out_degrees[v_head_global]) { // local_push_labels( // v_head_global, // start_index, // bound_index, // roots_start, // labels_buffer, // G, // short_index, // got_candidates_queue, // end_got_candidates_queue, // got_candidates, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // bp_labels_table, // used_bp_roots, // iter); // } // start_index = bound_index; // } // } // scatter_time += WallTimer::get_time_mark(); // } // // // Traverse vertices in the got_candidates_queue to insert labels // { // gather_time -= WallTimer::get_time_mark(); // std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // // pair.first: root id // // pair.second: label (global) id of the root // for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { // VertexID v_id_local = got_candidates_queue[i_queue]; // VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id_local] = 0; // reset got_candidates // // Traverse v_id's all candidates // VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; // for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; // short_index[v_id_local].is_candidate[cand_root_id] = 0; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if ( distance_query( // cand_root_id, // v_id_local, // roots_start, // // L, // dist_table, // iter) ) { // if (!is_active[v_id_local]) { // is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only( // cand_root_id, // v_id_local, // roots_start, // roots_size, // G, //// dist_table, // buffer_send); //// iter); // } // } // short_index[v_id_local].end_candidates_que = 0; // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id_local, // inserted_count, // // L, // short_index, // b_id, // iter); // } // } //// {//test //// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); //// } // end_got_candidates_queue = 0; // Set the got_candidates_queue empty // // Sync the dist_table // for (int root = 0; root < num_hosts; ++root) { // std::vector<std::pair<VertexID, VertexID>> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, VertexID> &e : buffer_recv) { // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // dist_table[root_id][cand_real_id] = iter; // // Record the received element, for future reset // recved_dist_table[root_id].push_back(cand_real_id); // } // } // // // Sync the global_num_actives // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // gather_time += WallTimer::get_time_mark(); // } // } // // // Reset the dist_table // clearup_time -= WallTimer::get_time_mark(); // reset_at_end( //// G, //// roots_start, //// roots_master_local, // dist_table, // recved_dist_table, // bp_labels_table); // clearup_time += WallTimer::get_time_mark(); //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Every host h_i broadcast to others // for (int root = 0; root < num_hosts; ++root) { // std::vector<E_T> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } //// uint64_t size_buffer_send = buffer_send.size(); //// // Sync the size_buffer_send. //// message_time -= WallTimer::get_time_mark(); //// MPI_Bcast(&size_buffer_send, //// 1, //// MPI_UINT64_T, //// root, //// MPI_COMM_WORLD); //// message_time += WallTimer::get_time_mark(); ////// {// test ////// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); ////// } //// if (!size_buffer_send) { //// continue; //// } //// message_time -= WallTimer::get_time_mark(); //// std::vector<E_T> buffer_recv(size_buffer_send); //// if (host_id == root) { //// buffer_recv.assign(buffer_send.begin(), buffer_send.end()); //// } //// uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; //// if (bytes_buffer_send < static_cast<size_t>(INT_MAX)) { //// // Only need 1 broadcast //// //// MPI_Bcast(buffer_recv.data(), //// bytes_buffer_send, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// } else { //// const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; //// const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; //// size_t offset = 0; //// for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { ////// size_t offset = b_i * unit_buffer_size; //// size_t size_unit_buffer = b_i == num_unit_buffers - 1 //// ? size_buffer_send - offset //// : unit_buffer_size; //// MPI_Bcast(buffer_recv.data() + offset, //// size_unit_buffer * ETypeSize, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// offset += unit_buffer_size; //// } //// } //// message_time += WallTimer::get_time_mark(); // for (const E_T &e : buffer_recv) { // fun(e); // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // // Every host sends to others // for (int src = 0; src < num_hosts; ++src) { // if (host_id == src) { // // Send from src // message_time -= WallTimer::get_time_mark(); // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, host_id); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // } // message_time += WallTimer::get_time_mark(); // } else { // // Receive from src // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, src); // if (host_id == dst) { // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // Every host sends (num_hosts - 1) times // for (int hop = 1; hop < num_hosts; ++hop) { // int src = hop_2_me_host_id(-hop); // int dst = hop_2_me_host_id(hop); // if (src != dst) { // Normal case // // When host_id is odd, first receive, then send. // if (static_cast<uint32_t>(host_id) & 1U) { // message_time -= WallTimer::get_time_mark(); // // Receive first. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // // Send then. // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // When host_id is even, first send, then receive. // // Send first. // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // // Receive then. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } else { // If host_id is higher than dst, first send, then receive // // This is a special case. It only happens when the num_hosts is even and hop equals to num_hosts/2. // if (host_id < dst) { // // Send // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Receive // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // Otherwise, if host_id is lower than dst, first receive, then send // // Receive // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Send // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } //} //// DEPRECATED version Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // const uint32_t UNIT_BUFFER_SIZE = 16U << 20U; // // Every host h_i broadcast to others // for (int h_i = 0; h_i < num_hosts; ++h_i) { // uint64_t size_buffer_send = buffer_send.size(); // // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&size_buffer_send, // 1, // MPI_UINT64_T, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); //// {// test //// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); //// } // if (!size_buffer_send) { // continue; // } // uint32_t num_unit_buffers = (size_buffer_send + UNIT_BUFFER_SIZE - 1) / UNIT_BUFFER_SIZE; // // // Broadcast the buffer_send // for (uint32_t b_i = 0; b_i < num_unit_buffers; ++b_i) { // // Prepare the unit buffer // message_time -= WallTimer::get_time_mark(); // size_t offset = b_i * UNIT_BUFFER_SIZE; // size_t size_unit_buffer = b_i == num_unit_buffers - 1 // ? size_buffer_send - offset // : UNIT_BUFFER_SIZE; // std::vector<E_T> unit_buffer(size_unit_buffer); // // Copy the messages from buffer_send to unit buffer. // if (host_id == h_i) { // unit_buffer.assign(buffer_send.begin() + offset, buffer_send.begin() + offset + size_unit_buffer); // } // // Broadcast the unit buffer // MPI_Bcast(unit_buffer.data(), // MPI_Instance::get_sending_size(unit_buffer), // MPI_CHAR, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // // Process every element of unit_buffer // for (const E_T &e : unit_buffer) { // fun(e); // } // } // } //} // Function: Host root broadcasts its sending buffer to a receiving buffer. template <VertexID BATCH_SIZE> template <typename E_T> inline void DistBVCPLL<BATCH_SIZE>:: one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv) { const size_t ETypeSize = sizeof(E_T); uint64_t size_buffer_send = buffer_send.size(); // Sync the size_buffer_send. message_time -= WallTimer::get_time_mark(); MPI_Bcast(&size_buffer_send, 1, MPI_UINT64_T, root, MPI_COMM_WORLD); message_time += WallTimer::get_time_mark(); buffer_recv.resize(size_buffer_send); if (!size_buffer_send) { return; } // Broadcast the buffer_send message_time -= WallTimer::get_time_mark(); if (host_id == root) { buffer_recv.assign(buffer_send.begin(), buffer_send.end()); } uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; if (bytes_buffer_send <= static_cast<size_t>(INT_MAX)) { // Only need 1 broadcast MPI_Bcast(buffer_recv.data(), bytes_buffer_send, MPI_CHAR, root, MPI_COMM_WORLD); } else { const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; size_t offset = 0; for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { size_t size_unit_buffer = b_i == num_unit_buffers - 1 ? size_buffer_send - offset : unit_buffer_size; MPI_Bcast(buffer_recv.data() + offset, size_unit_buffer * ETypeSize, MPI_CHAR, root, MPI_COMM_WORLD); offset += unit_buffer_size; } } message_time += WallTimer::get_time_mark(); } //// DEPRECATED Function: Host root broadcasts its sending buffer to a receiving buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //one_host_bcasts_buffer_to_buffer( // int root, // std::vector<E_T> &buffer_send, // std::vector<E_T> &buffer_recv) //{ // const uint32_t UNIT_BUFFER_SIZE = 16U << 20U; // uint64_t size_buffer_send = buffer_send.size(); // // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&size_buffer_send, // 1, // MPI_UINT64_T, // root, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // buffer_recv.resize(size_buffer_send); // if (!size_buffer_send) { // return; // } // uint32_t num_unit_buffers = (size_buffer_send + UNIT_BUFFER_SIZE - 1) / UNIT_BUFFER_SIZE; // // // Broadcast the buffer_send // message_time -= WallTimer::get_time_mark(); // for (uint32_t b_i = 0; b_i < num_unit_buffers; ++b_i) { // // Prepare the unit buffer // size_t offset = b_i * UNIT_BUFFER_SIZE; // size_t size_unit_buffer = b_i == num_unit_buffers - 1 // ? size_buffer_send - offset // : UNIT_BUFFER_SIZE; // std::vector<E_T> unit_buffer(size_unit_buffer); // // Copy the messages from buffer_send to unit buffer. // if (host_id == root) { // unit_buffer.assign(buffer_send.begin() + offset, buffer_send.begin() + offset + size_unit_buffer); // } // // Broadcast the unit buffer // MPI_Bcast(unit_buffer.data(), // MPI_Instance::get_sending_size(unit_buffer), // MPI_CHAR, // root, // MPI_COMM_WORLD); // // Copy unit buffer to buffer_recv // std::copy(unit_buffer.begin(), unit_buffer.end(), buffer_recv.begin() + offset); // } // message_time += WallTimer::get_time_mark(); //} //// Function: Distance query of a pair of vertices, used for distrubuted version. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline UnweightedDist DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //dist_distance_query_pair( // VertexID a_input, // VertexID b_input, // const DistGraph &G) //{ // struct TmpMsgBPLabel { // UnweightedDist bp_dist[BITPARALLEL_SIZE]; // uint64_t bp_sets[BITPARALLEL_SIZE][2]; // // TmpMsgBPLabel() = default; // TmpMsgBPLabel(const UnweightedDist dist[], const uint64_t sets[][2]) // { // memcpy(bp_dist, dist, sizeof(bp_dist)); // memcpy(bp_sets, sets, sizeof(bp_sets)); // } // }; // // VertexID a_global = G.rank[a_input]; // VertexID b_global = G.rank[b_input]; // int a_host_id = G.get_master_host_id(a_global); // int b_host_id = G.get_master_host_id(b_global); // UnweightedDist min_d = MAX_UNWEIGHTED_DIST; // // // Both local // if (a_host_id == host_id && b_host_id == host_id) { // VertexID a_local = G.get_local_vertex_id(a_global); // VertexID b_local = G.get_local_vertex_id(b_global); // // Check Bit-Parallel Labels first // { // const IndexType &La = L[a_local]; // const IndexType &Lb = L[b_local]; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = La.bp_dist[i] + Lb.bp_dist[i]; // if (td - 2 <= min_d) { // td += // (La.bp_sets[i][0] & Lb.bp_sets[i][0]) ? -2 : // ((La.bp_sets[i][0] & Lb.bp_sets[i][1]) | // (La.bp_sets[i][1] & Lb.bp_sets[i][0])) // ? -1 : 0; // if (td < min_d) { // min_d = td; // } // } // } // } // // std::map<VertexID, UnweightedDist> markers; // // Traverse a's labels // { // const IndexType &Lr = L[a_local]; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // VertexID label_id = Lr.vertices[v_i] + id_offset; // markers[label_id] = dist; // } // } // } // } // // Traverse b's labels // { // const IndexType &Lr = L[b_local]; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // VertexID label_id = Lr.vertices[v_i] + id_offset; // const auto &tmp_l = markers.find(label_id); // if (tmp_l == markers.end()) { // continue; // } // int d = tmp_l->second + dist; // if (d < min_d) { // min_d = d; // } // } // } // } // } // } else { // // Host b_host_id sends to host a_host_id, then host a_host_id do the query // if (host_id == b_host_id) { // VertexID b_local = G.get_local_vertex_id(b_global); // const IndexType &Lr = L[b_local]; // // Bit-Parallel Labels // { // TmpMsgBPLabel msg_send(Lr.bp_dist, Lr.bp_sets); // MPI_Send(&msg_send, // sizeof(msg_send), // MPI_CHAR, // a_host_id, // SENDING_QUERY_BP_LABELS, // MPI_COMM_WORLD); // } // // Normal Labels // { // std::vector<std::pair<VertexID, UnweightedDist> > buffer_send; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // VertexID label_id = Lr.vertices[v_i] + id_offset; // buffer_send.emplace_back(label_id, dist); // } // } // } // // MPI_Instance::send_buffer_2_dst(buffer_send, // a_host_id, // SENDING_QUERY_LABELS, // SENDING_SIZE_QUERY_LABELS); //// ///////////////////////////////////////////////// //// // //// std::vector<MPI_Request> requests_list; //// MPI_Instance::send_buffer_2_dest(buffer_send, //// requests_list, //// a_host_id, //// SENDING_QUERY_LABELS, //// SENDING_SIZE_QUERY_LABELS); //// MPI_Waitall(requests_list.size(), //// requests_list.data(), //// MPI_STATUSES_IGNORE); //// // //// ///////////////////////////////////////////////// // } // } else if (host_id == a_host_id) { // VertexID a_local = G.get_local_vertex_id(a_global); // const IndexType &Lr = L[a_local]; // // Receive BP labels // { // TmpMsgBPLabel msg_recv; // MPI_Recv(&msg_recv, // sizeof(msg_recv), // MPI_CHAR, // b_host_id, // SENDING_QUERY_BP_LABELS, // MPI_COMM_WORLD, // MPI_STATUS_IGNORE); // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = Lr.bp_dist[i] + msg_recv.bp_dist[i]; // if (td - 2 <= min_d) { // td += // (Lr.bp_sets[i][0] & msg_recv.bp_sets[i][0]) ? -2 : // ((Lr.bp_sets[i][0] & msg_recv.bp_sets[i][1]) | // (Lr.bp_sets[i][1] & msg_recv.bp_sets[i][0])) // ? -1 : 0; // if (td < min_d) { // min_d = td; // } // } // } // } // std::map<VertexID, UnweightedDist> markers; // // Traverse a's labels // { // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // VertexID label_id = Lr.vertices[v_i] + id_offset; // markers[label_id] = dist; // } // } // } // } // // Receive b's labels // { // std::vector<std::pair<VertexID, UnweightedDist> > buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // b_host_id, // SENDING_QUERY_LABELS, // SENDING_SIZE_QUERY_LABELS); //// MPI_Instance::recv_buffer_from_source(buffer_recv, //// b_host_id, //// SENDING_QUERY_LABELS, //// SENDING_SIZE_QUERY_LABELS); // // for (const auto &l : buffer_recv) { // VertexID label_id = l.first; // const auto &tmp_l = markers.find(label_id); // if (tmp_l == markers.end()) { // continue; // } // int d = tmp_l->second + l.second; // if (d < min_d) { // min_d = d; // } // } // } // } // } // MPI_Allreduce(MPI_IN_PLACE, // &min_d, // 1, // MPI_Instance::get_mpi_datatype<UnweightedDist>(), // MPI_MIN, // MPI_COMM_WORLD); // return min_d; //} } #endif //PADO_DPADO_H
GB_unop__tgamma_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__tgamma_fp32_fp32) // op(A') function: GB (_unop_tran__tgamma_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = tgammaf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = tgammaf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = tgammaf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TGAMMA || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__tgamma_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = tgammaf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = tgammaf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__tgamma_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
shallow_water_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Miguel Maso Sotomayor // #ifndef KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED #define KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED // System includes // External includes // Project includes #include "includes/model_part.h" #include "utilities/parallel_utilities.h" namespace Kratos { ///@addtogroup ShallowWaterApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @ingroup ShallowWaterApplication * @class ShallowWaterUtilities * @brief This class is a wrapper of useful utilities for shallow water computations */ class KRATOS_API(SHALLOW_WATER_APPLICATION) ShallowWaterUtilities { public: ///@name Type Definitions ///@{ /// Pointer definition of ShallowWaterUtilities KRATOS_CLASS_POINTER_DEFINITION(ShallowWaterUtilities); typedef Node<3> NodeType; typedef Geometry<NodeType> GeometryType; ///@} ///@name Life Cycle ///@{ /// Default constructor. /// Destructor. ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void ComputeFreeSurfaceElevation(ModelPart& rModelPart); void ComputeHeightFromFreeSurface(ModelPart& rModelPart); void ComputeVelocity(ModelPart& rModelPart, bool PerformProjection = false); void ComputeSmoothVelocity(ModelPart& rModelPart); void ComputeMomentum(ModelPart& rModelPart); void ComputeEnergy(ModelPart& rModelPart); void ComputeAccelerations(ModelPart& rModelPart); double InverseHeight(const double Height, const double Epsilon); double WetFraction(double Height, double Epsilon); void FlipScalarVariable(Variable<double>& rOriginVariable, Variable<double>& rDestinationVariable, ModelPart& rModelPart); void IdentifySolidBoundary(ModelPart& rModelPart, double SeaWaterLevel, Flags SolidBoundaryFlag); void IdentifyWetDomain(ModelPart& rModelPart, Flags WetFlag, double Thickness = 0.0); void ResetDryDomain(ModelPart& rModelPart, double Thickness = 0.0); template<class TContainerType> void CopyFlag(Flags OriginFlag, Flags DestinationFlag, TContainerType& rContainer) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rContainer.size()); ++i) { auto it = rContainer.begin() + i; it->Set(DestinationFlag, it->Is(OriginFlag)); } } void NormalizeVector(ModelPart& rModelPart, Variable<array_1d<double,3>>& rVariable); template<class TVarType> void CopyVariableToPreviousTimeStep(ModelPart& rModelPart, const TVarType& rVariable) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rModelPart.NumberOfNodes()); ++i) { auto const it_node = rModelPart.NodesBegin() + i; it_node->FastGetSolutionStepValue(rVariable,1) = it_node->FastGetSolutionStepValue(rVariable); } } void SetMinimumValue(ModelPart& rModelPart, const Variable<double>& rVariable, double MinValue); /* * @brief Set the z-coordinate of the mesh to zero */ void SetMeshZCoordinateToZero(ModelPart& rModelPart); /* * @brief Set the z0-coordinate of the mesh to zero */ void SetMeshZ0CoordinateToZero(ModelPart& rModelPart); /* * @brief Move the z-coordinate of the mesh according to a variable */ void SetMeshZCoordinate(ModelPart& rModelPart, const Variable<double>& rVariable); /* *@brief Compute the L-2 norm for the given double variable */ template<bool THistorical> double ComputeL2Norm(ModelPart& rModelPart, const Variable<double>& rVariable) { double l2_norm = block_for_each<SumReduction<double>>(rModelPart.Elements(), [&](Element& rElem){ double partial_l2_norm = 0.0; for (auto& r_node : rElem.GetGeometry()) { partial_l2_norm += std::pow(GetValue<THistorical>(r_node, rVariable), 2); } partial_l2_norm *= rElem.GetGeometry().Area(); partial_l2_norm /= rElem.GetGeometry().size(); return partial_l2_norm; }); return std::sqrt(l2_norm); } /* *@brief Compute the L-2 norm for the given double variable inside an axis-aligned bounding box */ template<bool THistorical> double ComputeL2NormAABB( ModelPart& rModelPart, const Variable<double>& rVariable, Point& rLow, Point& rHigh) { double l2_norm = block_for_each<SumReduction<double>>(rModelPart.Elements(), [&](Element& rElem){ double partial_l2_norm = 0.0; if (rElem.GetGeometry().HasIntersection(rLow, rHigh)) { for (auto& r_node : rElem.GetGeometry()) { partial_l2_norm += std::pow(GetValue<THistorical>(r_node, rVariable), 2); } partial_l2_norm *= rElem.GetGeometry().Area(); partial_l2_norm /= rElem.GetGeometry().size(); } return partial_l2_norm; }); return std::sqrt(l2_norm); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ ///@} private: ///@name Operations ///@{ void CalculateMassMatrix(Matrix& rMassMatrix, const GeometryType& rGeometry); template<bool THistorical> double GetValue(NodeType& rNode, const Variable<double>& rVariable); ///@} }; // Class ShallowWaterUtilities ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED defined
conv_kernel_int8_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qwang@openailab.com */ #include "conv_kernel_int8_arm.h" #include "api/c_api.h" #include "utility/sys_port.h" #include <math.h> #include <stdbool.h> #include <stdint.h> #include <stdlib.h> #ifdef __aarch64__ void i8gemm_4x16_a72_int8(int* biases, int8_t* input, int8_t* kernel, long kernel_size, int8_t* output, int* multi, long output_xy, int* shift, int activation_min, int activation_max); void i8gemm_4x4_a72_int8(int* biases, int8_t* input, int8_t* kernel, long kernel_size, int8_t* output, int* multi, long output_xy, int* shift, int activation_min, int activation_max); void im2col_int8_1x1(int8_t* input, long input_xy, int8_t* col, long col_cnt, long input_chan); void im2col_int8_3x3(int8_t* input, long input_x, long input_y, long input_chan, int8_t* col, long stride); // col_start and col_end need to be 16 aligned // kernel_start need to be 4 aligned static void i8gemm4x16(int8_t* col, int8_t* kernel, bool bias_term, int* biases, int8_t* output, int* multi, int kernel_size, int output_xy, int col_start, int col_end, int kernel_start, int kernel_end, int activation_min, int activation_max, int* q_shift, int num_thread, int cpu_affinity) { int col_end3 = col_end & 3; int kernel_size_aligned2 = (kernel_size + 1) & -2; #pragma omp parallel for num_threads(num_thread) for (int kernel_num = (kernel_start & -16); kernel_num < (kernel_end & -16); kernel_num += 16) { int* cur_biases = NULL; if (bias_term) { cur_biases = biases + kernel_num; } int result[64] = {0}; int8_t* output_line[4]; int* pmulti = multi + kernel_num; int* pq_shift = q_shift + kernel_num; int8_t* cur_kernel = kernel + kernel_num * kernel_size_aligned2; int8_t* output_result = output + kernel_num * output_xy; for (int col_line = (col_start & -4); col_line < (col_end & -4); col_line += 4) { int8_t* cur_col = col + col_line * kernel_size_aligned2; i8gemm_4x16_a72_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, output_result + col_line, pmulti, output_xy, pq_shift, activation_min, activation_max); } if (col_end3) { int col_line = col_end & -4; int8_t* cur_col = col + col_line * kernel_size_aligned2; i8gemm_4x16_a72_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { output_line[j] = output + (kernel_num + i * 4 + j) * output_xy + col_line; } *(output_line[0] + 0) = result[i * 16 + 0]; *(output_line[1] + 0) = result[i * 16 + 5]; *(output_line[2] + 0) = result[i * 16 + 10]; *(output_line[3] + 0) = result[i * 16 + 15]; if ((col_end3) >= 2) { *(output_line[0] + 1) = result[i * 16 + 4]; *(output_line[1] + 1) = result[i * 16 + 1]; *(output_line[2] + 1) = result[i * 16 + 14]; *(output_line[3] + 1) = result[i * 16 + 11]; } if ((col_end3) == 3) { *(output_line[0] + 2) = result[i * 16 + 8]; *(output_line[1] + 2) = result[i * 16 + 13]; *(output_line[2] + 2) = result[i * 16 + 2]; *(output_line[3] + 2) = result[i * 16 + 7]; } } } } return; } // col_start and kernel_start need to be 4 aligned static void i8gemm4x4(int8_t* col, int8_t* kernel, bool bias_term, int* biases, int8_t* output, int* multi, int kernel_size, int output_xy, int col_start, int col_end, int kernel_start, int kernel_end, int activation_min, int activation_max, int* q_shift, int num_thread, int cpu_affinity) { int col_end3 = col_end & 3; int kernel_end3 = kernel_end & 3; int kernel_size_aligned2 = (kernel_size + 1) & -2; #pragma omp parallel for num_threads(num_thread) for (int kernel_num = kernel_start & -4; kernel_num < (kernel_end & -4); kernel_num += 4) { int* cur_biases = NULL; if (bias_term) { cur_biases = biases + kernel_num; } int result[16] = {0}; int8_t* output_line[4]; int* pmulti = multi + kernel_num; int* pq_shift = q_shift + kernel_num; int8_t* cur_kernel = kernel + kernel_num * kernel_size_aligned2; int8_t* output_result = output + kernel_num * output_xy; for (int col_line = (col_start & -4); col_line < (col_end & -4); col_line += 4) { int8_t* cur_col = col + col_line * kernel_size_aligned2; i8gemm_4x4_a72_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, output_result + col_line, pmulti, output_xy, pq_shift, activation_min, activation_max); } if (col_end3) { int col_line = col_end & -4; int8_t* cur_col = col + col_line * kernel_size_aligned2; i8gemm_4x4_a72_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max); for (int j = 0; j < 4; j++) { output_line[j] = output + (kernel_num + j) * output_xy + col_line; } *(output_line[0] + 0) = result[0]; *(output_line[1] + 0) = result[5]; *(output_line[2] + 0) = result[10]; *(output_line[3] + 0) = result[15]; if (col_end3 >= 2) { *(output_line[0] + 1) = result[4]; *(output_line[1] + 1) = result[1]; *(output_line[2] + 1) = result[14]; *(output_line[3] + 1) = result[11]; } if (col_end3 == 3) { *(output_line[0] + 2) = result[8]; *(output_line[1] + 2) = result[13]; *(output_line[2] + 2) = result[2]; *(output_line[3] + 2) = result[7]; } } } if (kernel_end3) { int kernel_num = kernel_end & -4; int* cur_biases = NULL; if (bias_term) { cur_biases = biases + kernel_num; } int result[16] = {0}; int8_t* output_line[4]; int* pmulti = multi + kernel_num; int* pq_shift = q_shift + kernel_num; int8_t* cur_kernel = kernel + kernel_num * kernel_size_aligned2; for (int col_line = (col_start & -4); col_line < (col_end & -4); col_line += 4) { int8_t* cur_col = col + col_line * kernel_size_aligned2; i8gemm_4x4_a72_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max); for (int j = 0; j < 4; j++) { output_line[j] = output + (kernel_num + j) * output_xy + col_line; } *(output_line[0] + 0) = result[0]; *(output_line[0] + 1) = result[4]; *(output_line[0] + 2) = result[8]; *(output_line[0] + 3) = result[12]; if (kernel_end3 >= 2) { *(output_line[1] + 0) = result[5]; *(output_line[1] + 1) = result[1]; *(output_line[1] + 2) = result[13]; *(output_line[1] + 3) = result[9]; } if (kernel_end3 == 3) { *(output_line[2] + 0) = result[10]; *(output_line[2] + 1) = result[14]; *(output_line[2] + 2) = result[2]; *(output_line[2] + 3) = result[6]; } } if (col_end3) { int col_line = col_end & -4; int8_t* cur_col = col + col_line * kernel_size_aligned2; i8gemm_4x4_a72_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max); for (int j = 0; j < 4; j++) { output_line[j] = output + (kernel_num + j) * output_xy + col_line; } *(output_line[0] + 0) = result[0]; if (col_end3 >= 2) *(output_line[0] + 1) = result[4]; if (col_end3 == 3) *(output_line[0] + 2) = result[8]; if (kernel_end3 >= 2) { *(output_line[1] + 0) = result[5]; if (col_end3 >= 2) *(output_line[1] + 1) = result[1]; if (col_end3 == 3) *(output_line[1] + 2) = result[13]; } if (kernel_end3 == 3) { *(output_line[2] + 0) = result[10]; if (col_end3 >= 2) *(output_line[2] + 1) = result[14]; if (col_end3 == 3) *(output_line[2] + 2) = result[2]; } } } return; } #else void i8gemm_4x4_a17_int8(int* biases, int8_t* input, int8_t* kernel, int kernel_size, int8_t* output, int* multi, int output_xy, int* shift, int activation_min, int activation_max); void i8gemm_4x8_a17_int8(int* biases, int8_t* input, int8_t* kernel, int kernel_size, int8_t* output, int* multi, int output_xy, int* shift, int activation_min, int activation_max); // col_start and col_end need to be 8 aligned kernel_start need to be 4 aligned static void i8gemm4x8(int8_t* col, int8_t* kernel, bool bias_term, int* biases, int8_t* output, int* multi, int kernel_size, int output_xy, int col_start, int col_end, int kernel_start, int kernel_end, int activation_min, int activation_max, int* q_shift, int num_thread, int cpu_affinity) { int col_end3 = col_end & 3; int kernel_size_aligned2 = (kernel_size + 1) & -2; #pragma omp parallel for num_threads(num_thread) for (int kernel_num = (kernel_start & -8); kernel_num < (kernel_end & -8); kernel_num += 8) { int* cur_biases = NULL; if (bias_term) { cur_biases = biases + kernel_num; } int result[32] = {0}; int8_t* output_line[4]; int* pmulti = multi + kernel_num; int* pq_shift = q_shift + kernel_num; int8_t* cur_kernel = kernel + kernel_num * kernel_size_aligned2; int8_t* output_result = output + kernel_num * output_xy; for (int col_line = (col_start & -4); col_line < (col_end & -4); col_line += 4) { int8_t* cur_col = col + col_line * kernel_size_aligned2; i8gemm_4x8_a17_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, output_result + col_line, pmulti, output_xy, pq_shift, activation_min, activation_max); } if (col_end3) { int col_line = col_end & -4; int8_t* cur_col = col + col_line * kernel_size_aligned2; i8gemm_4x8_a17_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max); for (int i = 0; i < 2; i++) { for (int j = 0; j < 4; j++) { output_line[j] = output + (kernel_num + i * 4 + j) * output_xy + col_line; } *(output_line[0] + 0) = result[i * 16 + 0]; *(output_line[1] + 0) = result[i * 16 + 5]; *(output_line[2] + 0) = result[i * 16 + 10]; *(output_line[3] + 0) = result[i * 16 + 15]; if (col_end3 >= 2) { *(output_line[0] + 1) = result[i * 16 + 4]; *(output_line[1] + 1) = result[i * 16 + 1]; *(output_line[2] + 1) = result[i * 16 + 14]; *(output_line[3] + 1) = result[i * 16 + 11]; } if (col_end3 == 3) { *(output_line[0] + 2) = result[i * 16 + 8]; *(output_line[1] + 2) = result[i * 16 + 13]; *(output_line[2] + 2) = result[i * 16 + 2]; *(output_line[3] + 2) = result[i * 16 + 7]; } } } } return; } // col_start and kernel_start need to be 4 aligned static void i8gemm4x4(int8_t* col, int8_t* kernel, bool bias_term, int* biases, int8_t* output, int* multi, int kernel_size, int output_xy, int col_start, int col_end, int kernel_start, int kernel_end, int activation_min, int activation_max, int* q_shift, int num_thread, int cpu_affinity) { int col_end3 = col_end & 3; int kernel_end3 = kernel_end & 3; int kernel_size_aligned2 = (kernel_size + 1) & -2; #pragma omp parallel for num_threads(num_thread) for (int kernel_num = (kernel_start & -4); kernel_num < (kernel_end & -4); kernel_num += 4) { int* cur_biases = NULL; if (bias_term) { cur_biases = biases + kernel_num; } int result[16] = {0}; int8_t* output_line[4]; int* pmulti = multi + kernel_num; int* pq_shift = q_shift + kernel_num; int8_t* cur_kernel = kernel + kernel_num * kernel_size_aligned2; int8_t* output_result = output + kernel_num * output_xy; for (int col_line = (col_start & -4); col_line < (col_end & -4); col_line += 4) { int8_t* cur_col = col + col_line * kernel_size_aligned2; i8gemm_4x4_a17_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, output_result + col_line, pmulti, output_xy, pq_shift, activation_min, activation_max); } if (col_end3) { int col_line = col_end & -4; int8_t* cur_col = col + col_line * kernel_size_aligned2; i8gemm_4x4_a17_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max); for (int j = 0; j < 4; j++) { output_line[j] = output + (kernel_num + j) * output_xy + col_line; } *(output_line[0] + 0) = result[0]; *(output_line[1] + 0) = result[5]; *(output_line[2] + 0) = result[10]; *(output_line[3] + 0) = result[15]; if (col_end3 >= 2) { *(output_line[0] + 1) = result[4]; *(output_line[1] + 1) = result[1]; *(output_line[2] + 1) = result[14]; *(output_line[3] + 1) = result[11]; } if (col_end3 == 3) { *(output_line[0] + 2) = result[8]; *(output_line[1] + 2) = result[13]; *(output_line[2] + 2) = result[2]; *(output_line[3] + 2) = result[7]; } } } if (kernel_end3) { int kernel_num = kernel_end & -4; int* cur_biases = NULL; if (bias_term) { cur_biases = biases + kernel_num; } int result[16] = {0}; int8_t* output_line[4]; int* pmulti = multi + kernel_num; int* pq_shift = q_shift + kernel_num; int8_t* cur_kernel = kernel + kernel_num * kernel_size_aligned2; for (int col_line = (col_start & -4); col_line < (col_end & -4); col_line += 4) { int8_t* cur_col = col + col_line * kernel_size_aligned2; i8gemm_4x4_a17_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max); for (int j = 0; j < 4; j++) { output_line[j] = output + (kernel_num + j) * output_xy + col_line; } *(output_line[0] + 0) = result[0]; *(output_line[0] + 1) = result[4]; *(output_line[0] + 2) = result[8]; *(output_line[0] + 3) = result[12]; if (kernel_end3 >= 2) { *(output_line[1] + 0) = result[5]; *(output_line[1] + 1) = result[1]; *(output_line[1] + 2) = result[13]; *(output_line[1] + 3) = result[9]; } if (kernel_end3 == 3) { *(output_line[2] + 0) = result[10]; *(output_line[2] + 1) = result[14]; *(output_line[2] + 2) = result[2]; *(output_line[2] + 3) = result[6]; } } if (col_end3) { int col_line = col_end & -4; int8_t* cur_col = col + col_line * kernel_size_aligned2; i8gemm_4x4_a17_int8(cur_biases, cur_col, cur_kernel, kernel_size_aligned2, (int8_t*)result, pmulti, 0, pq_shift, activation_min, activation_max); for (int j = 0; j < 4; j++) { output_line[j] = output + (kernel_num + j) * output_xy + col_line; } *(output_line[0] + 0) = result[0]; if (col_end3 >= 2) *(output_line[0] + 1) = result[4]; if (col_end3 == 3) *(output_line[0] + 2) = result[8]; if (kernel_end3 >= 2) { *(output_line[1] + 0) = result[5]; if (col_end3 >= 2) *(output_line[1] + 1) = result[1]; if (col_end3 == 3) *(output_line[1] + 2) = result[13]; } if (kernel_end3 == 3) { *(output_line[2] + 0) = result[10]; if (col_end3 >= 2) *(output_line[2] + 1) = result[14]; if (col_end3 == 3) *(output_line[2] + 2) = result[2]; } } } return; } #endif /* * get the memory size for im2col + sgemm of kernel tensor interleave */ static int get_private_mem_size(struct tensor* filter, struct conv_param* param) { int group = param->group; int out_chan = filter->dims[0] / group; int out_chan_align4 = (out_chan + 3) / 4 * 4; int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3]; int mem_size = kernel_size * filter->elem_size * out_chan_align4 * group + 128; // caution return mem_size; } int int8_conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_mem = 1; priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; return 0; } int int8_conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_pack4_mem = 0; priv_info->im2col_buffer_pack4 = NULL; priv_info->im2col_buffer_pack4_size = 0; return 0; } int int8_conv_hcl_get_shared_mem_size(struct tensor* input, struct tensor* output, struct conv_param* param) { int in_h = input->dims[2]; int in_w = input->dims[3]; int out_h = output->dims[2]; int out_w = output->dims[3]; int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int out_cstep = out_h * out_w; // channel cstep, output_h * output_w int elem_size = input->elem_size; // uint8/int8 is 1 byte, fp32 is 4 bytes out_cstep = (out_cstep + 3) / 4 * 4; int kernel_size_aligned2 = (kernel_size + 1) & -2; int mem_size = elem_size * kernel_size_aligned2 * out_cstep + 128; return mem_size; } void interleave_kernel_int8(int8_t* kernel, int8_t* kernel_int8, int kernel_chan, int kernel_size) { #ifdef __aarch64__ int8_t* cur_kernel[16]; int8_t* cur_kernel_int8 = kernel_int8; int i, j, k; // interleave 16 kernels for (i = 0; i < (kernel_chan & -16); i += 16) { for (j = 0; j < 16; j++) cur_kernel[j] = kernel + kernel_size * (i + j); for (j = 0; j < (kernel_size & -2); j += 2) for (k = 0; k < 16; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = *(cur_kernel[k] + j + 1); } if (kernel_size & 0x1) for (k = 0; k < 16; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = 0; } } // interleave 4 kernels for (i = (kernel_chan & -16); i < (kernel_chan & -4); i += 4) { for (j = 0; j < 4; j++) cur_kernel[j] = kernel + kernel_size * (i + j); for (j = 0; j < (kernel_size & -2); j += 2) for (k = 0; k < 4; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = *(cur_kernel[k] + j + 1); } if (kernel_size & 0x1) for (k = 0; k < 4; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = 0; } } // last 4 kernels if ((kernel_chan & 0x3) != 0) { for (j = 0; j < 3; j++) cur_kernel[j] = kernel + kernel_size * (i + j); if ((kernel_chan & 0x3) == 3) { for (j = 0; j < (kernel_size & -2); j += 2) { for (k = 0; k < 3; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = *(cur_kernel[k] + j + 1); } for (k = 0; k < 2; k++) *(cur_kernel_int8++) = 0; } if (kernel_size & 0x1) { for (k = 0; k < 3; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = 0; } for (k = 0; k < 2; k++) *(cur_kernel_int8++) = 0; } } else if ((kernel_chan & 0x3) == 2) { for (j = 0; j < (kernel_size & -2); j += 2) { for (k = 0; k < 2; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = *(cur_kernel[k] + j + 1); } for (k = 0; k < 4; k++) *(cur_kernel_int8++) = 0; } if (kernel_size & 0x1) { for (k = 0; k < 2; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = 0; } for (k = 0; k < 4; k++) *(cur_kernel_int8++) = 0; } } else if ((kernel_chan & 0x3) == 1) { for (j = 0; j < (kernel_size & -2); j += 2) { *(cur_kernel_int8++) = *(cur_kernel[0] + j); *(cur_kernel_int8++) = *(cur_kernel[0] + j + 1); for (k = 0; k < 6; k++) *(cur_kernel_int8++) = 0; } if (kernel_size & 0x1) { *(cur_kernel_int8++) = *(cur_kernel[0] + j); for (k = 0; k < 7; k++) *(cur_kernel_int8++) = 0; } } } #else int8_t* cur_kernel[8]; int8_t* cur_kernel_int8 = kernel_int8; int i, j, k; int kernel_chan3 = kernel_chan & 0x3; int kernel_size1 = kernel_size & 0x1; // interleave 8 kernels for (i = 0; i < (kernel_chan & -8); i += 8) { for (j = 0; j < 8; j++) cur_kernel[j] = kernel + kernel_size * (i + j); for (j = 0; j < (kernel_size & -2); j += 2) for (k = 0; k < 8; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = *(cur_kernel[k] + j + 1); } if (kernel_size1) for (k = 0; k < 8; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = 0; } } // interleave 4 kernels for (; i < (kernel_chan & -4); i += 4) { for (j = 0; j < 4; j++) cur_kernel[j] = kernel + kernel_size * (i + j); for (j = 0; j < (kernel_size & -2); j += 2) for (k = 0; k < 4; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = *(cur_kernel[k] + j + 1); } if (kernel_size1) for (k = 0; k < 4; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = 0; } } // last 4 kernels if (kernel_chan3) { for (j = 0; j < 3; j++) cur_kernel[j] = kernel + kernel_size * (i + j); if ((kernel_chan3) == 3) { for (j = 0; j < (kernel_size & -2); j += 2) { for (k = 0; k < 3; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = *(cur_kernel[k] + j + 1); } for (k = 0; k < 2; k++) *(cur_kernel_int8++) = 0; } if (kernel_size1) { for (k = 0; k < 3; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = 0; } for (k = 0; k < 2; k++) *(cur_kernel_int8++) = 0; } } else if ((kernel_chan3) == 2) { for (j = 0; j < (kernel_size & -2); j += 2) { for (k = 0; k < 2; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = *(cur_kernel[k] + j + 1); } for (k = 0; k < 4; k++) *(cur_kernel_int8++) = 0; } if (kernel_size1) { for (k = 0; k < 2; k++) { *(cur_kernel_int8++) = *(cur_kernel[k] + j); *(cur_kernel_int8++) = 0; } for (k = 0; k < 4; k++) *(cur_kernel_int8++) = 0; } } else { // kernel_chan & 0x3 == 1 for (j = 0; j < (kernel_size & -2); j += 2) { *(cur_kernel_int8++) = *(cur_kernel[0] + j); *(cur_kernel_int8++) = *(cur_kernel[0] + j + 1); for (k = 0; k < 6; k++) *(cur_kernel_int8++) = 0; } if (kernel_size1) { *(cur_kernel_int8++) = *(cur_kernel[0] + j); for (k = 0; k < 7; k++) *(cur_kernel_int8++) = 0; } } } #endif return; } /* kernel interleave */ static void interleave_int8(struct tensor* filter, struct conv_priv_info* priv_info, struct conv_param* param) { int group = param->group; int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3]; int out_chan = filter->dims[0] / group; int out_chan_align4 = (out_chan + 3) / 4 * 4; int kernel_size_algin = kernel_size * out_chan_align4; int kernel_size_group = kernel_size * out_chan; int8_t* kernel = filter->data; int8_t* interleave_buf = priv_info->interleave_buffer; for (int g = 0; g < group; g++) { int8_t* cur_kernel = kernel + g * kernel_size_group; int8_t* cur_interleave = interleave_buf + g * kernel_size_algin; interleave_kernel_int8(cur_kernel, cur_interleave, out_chan, kernel_size); } } static void im2col_int8(int8_t* im, int8_t* col, int input_chan, int input_x, int input_y, int kernel_x, int kernel_y, int stride_x, int stride_y, int dilation_x, int dilation_y, int pad_x0, int pad_x1, int pad_y0, int pad_y1, int output_x, int output_y, int num_thread) { int col_start = 0; int col_end = output_x * output_y; int kernel_xy = kernel_x * kernel_y; int kernel_size = kernel_xy * input_chan; int kernel_size_aligned2 = (kernel_size + 1) & -2; int input_xy = input_x * input_y; int col_end3 = col_end & 0x3; int kernel_size1 = kernel_size & 0x1; int is_1x1 = (kernel_x == 1) && (kernel_y == 1) && (stride_x == 1) && (stride_y == 1); int is_3x3 = (kernel_x == 3) && (kernel_y == 3) && (dilation_x == 1) && (dilation_y == 1); bool is_pad0 = (pad_x0 == 0) && (pad_y0 == 0) && (pad_x1 == 0) && (pad_y1 == 0); #ifdef __aarch64__ // is 1x1 if (is_1x1) { int8_t* cur_col = col + col_start * kernel_size_aligned2; int col_cnt = (col_end & -4) - (col_start & -4); im2col_int8_1x1((int8_t*)im + col_start, input_xy, cur_col, col_cnt, kernel_size); cur_col += col_cnt * kernel_size_aligned2; int col_i = col_end & -4; // final 4 input if (col_end3) { for (int kch = 0; kch < (kernel_size & -2); kch += 2) { for (int i = 0; i < 4; i++) { if ((col_i + i) < col_end) { *cur_col++ = *(im + input_xy * (kch + 0) + col_i + i); *cur_col++ = *(im + input_xy * (kch + 1) + col_i + i); } else { *cur_col++ = 0; *cur_col++ = 0; } } } int kch = kernel_size & -2; if (kernel_size1) { for (int i = 0; i < 4; i++) { if ((col_i + i) < col_end) { *cur_col++ = *(im + input_xy * (kch + 0) + col_i + i); *cur_col++ = 0; } else { *cur_col++ = 0; *cur_col++ = 0; } } } } } // 3x3 non dilation else if (is_3x3) { #pragma omp parallel for num_threads(num_thread) for (int col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4) { int imx[4] = {0}; int imy[4] = {0}; int cnt_x[4] = {0}; int cnt_y[4] = {0}; int imx_start[4] = {0}; int imy_start[4] = {0}; int8_t* cur_col = col + col_i * kernel_size_aligned2; for (int i = 0; i < 4; i++) { cnt_y[i] = (col_i + i) / output_x; cnt_x[i] = col_i + i - cnt_y[i] * output_x; imx_start[i] = cnt_x[i] * stride_x - pad_x0; imy_start[i] = cnt_y[i] * stride_y - pad_y0; } if ((cnt_y[0] == cnt_y[3]) && (is_pad0 || (cnt_y[0] > 0 && cnt_x[0] > 0 && cnt_y[0] < (output_y - 1) && cnt_x[3] < (output_x - 1)))) { int8_t* input_start = (int8_t*)(im + imy_start[0] * input_x + imx_start[0]); im2col_int8_3x3(input_start, input_x, input_y, input_chan, cur_col, stride_x); cur_col += 4 * kernel_size_aligned2; } else { bool odd_line = false; int kchp = 0; int kyp = 0; for (int kch = 0; kch < input_chan; kch++) { for (int ky = 0; ky < 3; ky++) { if (odd_line) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp; imx[i] = imx_start[i] + 2; if (imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; imy[i] = imy_start[i] + ky; if (imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]); else *cur_col++ = 0; } for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + 1 + k; if (imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } odd_line = false; } // even line 2n else { for (int i = 0; i < 4; i++) imy[i] = imy_start[i] + ky; for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + k; if (imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } kchp = kch; kyp = ky; odd_line = true; } } } if (kernel_size1) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp; imx[i] = imx_start[i] + 2; if (imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; *cur_col++ = 0; } } } } int col_i = col_end & -4; if (col_end3) { int imx[4] = {0}; int imy[4] = {0}; int cnt_x[4] = {0}; int cnt_y[4] = {0}; int imx_start[4] = {0}; int imy_start[4] = {0}; int8_t* cur_col = col + col_i * kernel_size_aligned2; for (int i = 0; i < 4; i++) { cnt_y[i] = (col_i + i) / output_x; cnt_x[i] = col_i + i - cnt_y[i] * output_x; imx_start[i] = cnt_x[i] * stride_x - pad_x0; imy_start[i] = cnt_y[i] * stride_y - pad_y0; } bool odd_line = false; int kchp = 0; int kyp = 0; for (int kch = 0; kch < input_chan; kch++) { for (int ky = 0; ky < 3; ky++) { // odd line 1 + 2n if (odd_line) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp; imx[i] = imx_start[i] + 2; if ((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; imy[i] = imy_start[i] + ky; if ((i < col_end3) && imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]); else *cur_col++ = 0; } for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + (1 + k); if ((i < col_end3) && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } odd_line = false; } // even line 2n + 1 else { for (int i = 0; i < 4; i++) imy[i] = imy_start[i] + ky; for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + k; if (i < col_end3 && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } kchp = kch; kyp = ky; odd_line = true; } } } if (kernel_size1) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp; imx[i] = imx_start[i] + 2; if ((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; *cur_col++ = 0; } } } } // general case for kernel size <=3 else if ((kernel_x) < 4 && (kernel_y < 4)) { int kch[2], kx[2], ky[2], imx[4][2], imy[4][2]; int8_t* cur_col = col + col_start * kernel_size_aligned2; for (int col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4) { int cnt_x[4] = {0}; int cnt_y[4] = {0}; int imx_start[4] = {0}; int imy_start[4] = {0}; for (int i = 0; i < 4; i++) { cnt_y[i] = (col_i + i) / output_x; cnt_x[i] = col_i + i - cnt_y[i] * output_x; imx_start[i] = cnt_x[i] * stride_x - pad_x0; imy_start[i] = cnt_y[i] * stride_y - pad_y0; } for (int col_j = 0; col_j < (kernel_size & -2); col_j += 2) { for (int k = 0; k < 2; k++) { kch[k] = (col_j + k) / kernel_xy; ky[k] = (col_j + k - kch[k] * kernel_xy) / kernel_x; kx[k] = (col_j + k - kch[k] * kernel_xy) - ky[k] * kernel_x; ky[k] = ky[k] * dilation_y; kx[k] = kx[k] * dilation_x; for (int i = 0; i < 4; i++) { imx[i][k] = imx_start[i] + kx[k]; imy[i][k] = imy_start[i] + ky[k]; } } for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { if (imx[i][k] >= 0 && imx[i][k] < input_x && imy[i][k] >= 0 && imy[i][k] < input_y) *cur_col++ = *(im + input_xy * kch[k] + input_x * imy[i][k] + imx[i][k]); else *cur_col++ = 0; } } } int col_j = kernel_size & -2; if (kernel_size1) { kch[0] = col_j / kernel_xy; ky[0] = (col_j - kch[0] * kernel_xy) / kernel_x; kx[0] = col_j - kch[0] * kernel_xy - ky[0] * kernel_x; ky[0] = ky[0] * dilation_y; kx[0] = kx[0] * dilation_x; for (int i = 0; i < 4; i++) { imx[i][0] = imx_start[i] + kx[0]; imy[i][0] = imy_start[i] + ky[0]; if (imx[i][0] >= 0 && imx[i][0] < input_x && imy[i][0] >= 0 && imy[i][0] < input_y) *cur_col++ = *(im + input_xy * kch[0] + input_x * imy[i][0] + imx[i][0]); else *cur_col++ = 0; *cur_col++ = 0; } } } int col_i = col_end & -4; // final 4 input if (col_end3) { int cnt_x[4] = {0}; int cnt_y[4] = {0}; int imx_start[4] = {0}; int imy_start[4] = {0}; for (int i = 0; i < 4; i++) { cnt_y[i] = (col_i + i) / output_x; cnt_x[i] = col_i + i - cnt_y[i] * output_x; imx_start[i] = cnt_x[i] * stride_x - pad_x0; imy_start[i] = cnt_y[i] * stride_y - pad_y0; } for (int col_j = 0; col_j < (kernel_size & -2); col_j += 2) { for (int k = 0; k < 2; k++) { kch[k] = (col_j + k) / kernel_xy; ky[k] = (col_j + k - kch[k] * kernel_xy) / kernel_x; kx[k] = (col_j + k - kch[k] * kernel_xy) - ky[k] * kernel_x; ky[k] = ky[k] * dilation_y; kx[k] = kx[k] * dilation_x; for (int i = 0; i < 4; i++) { imx[i][k] = imx_start[i] + kx[k]; imy[i][k] = imy_start[i] + ky[k]; } } for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { if ((col_i + i) < col_end && imx[i][k] >= 0 && imx[i][k] < input_x && imy[i][k] >= 0 && imy[i][k] < input_y) *cur_col++ = *(im + input_xy * kch[k] + input_x * imy[i][k] + imx[i][k]); else *cur_col++ = 0; } } } int col_j = kernel_size & -2; if (kernel_size1) { kch[0] = col_j / kernel_xy; ky[0] = (col_j - kch[0] * kernel_xy) / kernel_x; kx[0] = col_j - kch[0] * kernel_xy - ky[0] * kernel_x; ky[0] = ky[0] * dilation_y; kx[0] = kx[0] * dilation_x; for (int i = 0; i < 4; i++) { imx[i][0] = imx_start[i] + kx[0]; imy[i][0] = imy_start[i] + ky[0]; if ((col_i + i) < col_end && imx[i][0] >= 0 && imx[i][0] < input_x && imy[i][0] >= 0 && imy[i][0] < input_y) *cur_col++ = *(im + input_xy * kch[0] + input_x * imy[i][0] + imx[i][0]); else *cur_col++ = 0; *cur_col++ = 0; } } } } // general case for kernel size >=3 else { int kch, kx, ky, kchp, kyp, imx[4], imy[4] = {0}; int kernel_x1 = kernel_x & 0x1; int8_t* cur_col = col + col_start * kernel_size_aligned2; for (int col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4) { int cnt_x[4] = {0}; int cnt_y[4] = {0}; int imx_start[4] = {0}; int imy_start[4] = {0}; for (int i = 0; i < 4; i++) { cnt_y[i] = (col_i + i) / output_x; cnt_x[i] = col_i + i - cnt_y[i] * output_x; imx_start[i] = cnt_x[i] * stride_x - pad_x0; imy_start[i] = cnt_y[i] * stride_y - pad_y0; } bool odd_line = false; kchp = 0; kyp = 0; for (int kch = 0; kch < input_chan; kch++) { for (ky = 0; ky < kernel_y; ky++) { // odd line 2 + 2n if (odd_line) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp * dilation_y; imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x; if (imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; imy[i] = imy_start[i] + ky * dilation_y; if (imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]); else *cur_col++ = 0; } for (kx = 1; kx < kernel_x; kx += 2) { for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + (kx + k) * dilation_x; if (imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } } odd_line = false; } // even line 2n else { for (int i = 0; i < 4; i++) imy[i] = imy_start[i] + ky * dilation_y; for (kx = 0; kx < (kernel_x - 1); kx += 2) { for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + (kx + k) * dilation_x; if (imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } } kchp = kch; kyp = ky; odd_line = kernel_x1 ? true : false; } } } if (kernel_size1) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp * dilation_y; imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x; if (imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; *cur_col++ = 0; } } } int col_i = col_end & -4; // final 4 input if (col_end3) { int cnt_x[4] = {0}; int cnt_y[4] = {0}; int imx_start[4] = {0}; int imy_start[4] = {0}; for (int i = 0; i < 4; i++) { cnt_y[i] = (col_i + i) / output_x; cnt_x[i] = col_i + i - cnt_y[i] * output_x; imx_start[i] = cnt_x[i] * stride_x - pad_x0; imy_start[i] = cnt_y[i] * stride_y - pad_y0; } bool odd_line = false; kchp = 0; kyp = 0; for (int kch = 0; kch < input_chan; kch++) { for (ky = 0; ky < kernel_y; ky++) { // odd line 1 + 2n if (odd_line) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp * dilation_y; imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x; if ((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; imy[i] = imy_start[i] + ky * dilation_y; if ((i < col_end3) && imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]); else *cur_col++ = 0; } for (kx = 1; kx < kernel_x; kx += 2) { for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + (kx + k) * dilation_x; if ((i < col_end3) && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } } odd_line = false; } // even line 2n + 1 else { for (int i = 0; i < 4; i++) imy[i] = imy_start[i] + ky * dilation_y; for (kx = 0; kx < (kernel_x - 1); kx += 2) { for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + (kx + k) * dilation_x; if (i < col_end3 && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } } kchp = kch; kyp = ky; odd_line = kernel_x1 ? true : false; } } } if (kernel_size1) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp * dilation_y; imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x; if ((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; *cur_col++ = 0; } } } } #else if (is_3x3) { int stride_x2 = stride_x * 2; int stride_x3 = stride_x * 3; // #pragma omp parallel for num_threads(num_thread) for (int col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4) { int imx[4] = {0}; int imy[4] = {0}; int cnt_x[4] = {0}; int cnt_y[4] = {0}; int imx_start[4] = {0}; int imy_start[4] = {0}; int8_t* cur_col = col + col_i * kernel_size_aligned2; for (int i = 0; i < 4; i++) { cnt_y[i] = (col_i + i) / output_x; cnt_x[i] = col_i + i - cnt_y[i] * output_x; imx_start[i] = cnt_x[i] * stride_x - pad_x0; imy_start[i] = cnt_y[i] * stride_y - pad_y0; } if ((cnt_y[0] == cnt_y[3]) && (is_pad0 || (cnt_y[0] > 0 && cnt_x[0] > 0 && cnt_y[0] < (output_y - 1) && cnt_x[3] < (output_x - 1)))) { int8_t* l00 = (int8_t*)(im + imy_start[0] * input_x + imx_start[0]); int8_t* l01 = l00 + input_x; int8_t* l02 = l00 + input_x * 2; int8_t* l10 = l00 + input_xy; int8_t* l11 = l10 + input_x; int8_t* l12 = l10 + input_x * 2; for (int kch = 0; kch < (input_chan & -2); kch += 2) { cur_col[0] = l00[0]; cur_col[1] = l00[1]; cur_col[2] = l00[0 + stride_x]; cur_col[3] = l00[1 + stride_x]; cur_col[4] = l00[0 + stride_x2]; cur_col[5] = l00[1 + stride_x2]; cur_col[6] = l00[0 + stride_x3]; cur_col[7] = l00[1 + stride_x3]; cur_col[8] = l00[2]; cur_col[9] = l01[0]; cur_col[10] = l00[2 + stride_x]; cur_col[11] = l01[0 + stride_x]; cur_col[12] = l00[2 + stride_x2]; cur_col[13] = l01[0 + stride_x2]; cur_col[14] = l00[2 + stride_x3]; cur_col[15] = l01[0 + stride_x3]; cur_col[16] = l01[1]; cur_col[17] = l01[2]; cur_col[18] = l01[1 + stride_x]; cur_col[19] = l01[2 + stride_x]; cur_col[20] = l01[1 + stride_x2]; cur_col[21] = l01[2 + stride_x2]; cur_col[22] = l01[1 + stride_x3]; cur_col[23] = l01[2 + stride_x3]; cur_col[24] = l02[0]; cur_col[25] = l02[1]; cur_col[26] = l02[0 + stride_x]; cur_col[27] = l02[1 + stride_x]; cur_col[28] = l02[0 + stride_x2]; cur_col[29] = l02[1 + stride_x2]; cur_col[30] = l02[0 + stride_x3]; cur_col[31] = l02[1 + stride_x3]; cur_col[32] = l02[2]; cur_col[33] = l10[0]; cur_col[34] = l02[2 + stride_x]; cur_col[35] = l10[0 + stride_x]; cur_col[36] = l02[2 + stride_x2]; cur_col[37] = l10[0 + stride_x2]; cur_col[38] = l02[2 + stride_x3]; cur_col[39] = l10[0 + stride_x3]; cur_col[40] = l10[1]; cur_col[41] = l10[2]; cur_col[42] = l10[1 + stride_x]; cur_col[43] = l10[2 + stride_x]; cur_col[44] = l10[1 + stride_x2]; cur_col[45] = l10[2 + stride_x2]; cur_col[46] = l10[1 + stride_x3]; cur_col[47] = l10[2 + stride_x3]; cur_col[48] = l11[0]; cur_col[49] = l11[1]; cur_col[50] = l11[0 + stride_x]; cur_col[51] = l11[1 + stride_x]; cur_col[52] = l11[0 + stride_x2]; cur_col[53] = l11[1 + stride_x2]; cur_col[54] = l11[0 + stride_x3]; cur_col[55] = l11[1 + stride_x3]; cur_col[56] = l11[2]; cur_col[57] = l12[0]; cur_col[58] = l11[2 + stride_x]; cur_col[59] = l12[0 + stride_x]; cur_col[60] = l11[2 + stride_x2]; cur_col[61] = l12[0 + stride_x2]; cur_col[62] = l11[2 + stride_x3]; cur_col[63] = l12[0 + stride_x3]; cur_col[64] = l12[1]; cur_col[65] = l12[2]; cur_col[66] = l12[1 + stride_x]; cur_col[67] = l12[2 + stride_x]; cur_col[68] = l12[1 + stride_x2]; cur_col[69] = l12[2 + stride_x2]; cur_col[70] = l12[1 + stride_x3]; cur_col[71] = l12[2 + stride_x3]; cur_col += 72; l00 += input_xy * 2; l01 += input_xy * 2; l02 += input_xy * 2; l10 += input_xy * 2; l11 += input_xy * 2; l12 += input_xy * 2; } if (input_chan & 0x1) { cur_col[0] = l00[0]; cur_col[1] = l00[1]; cur_col[2] = l00[0 + stride_x]; cur_col[3] = l00[1 + stride_x]; cur_col[4] = l00[0 + stride_x2]; cur_col[5] = l00[1 + stride_x2]; cur_col[6] = l00[0 + stride_x3]; cur_col[7] = l00[1 + stride_x3]; cur_col[8] = l00[2]; cur_col[9] = l01[0]; cur_col[10] = l00[2 + stride_x]; cur_col[11] = l01[0 + stride_x]; cur_col[12] = l00[2 + stride_x2]; cur_col[13] = l01[0 + stride_x2]; cur_col[14] = l00[2 + stride_x3]; cur_col[15] = l01[0 + stride_x3]; cur_col[16] = l01[1]; cur_col[17] = l01[2]; cur_col[18] = l01[1 + stride_x]; cur_col[19] = l01[2 + stride_x]; cur_col[20] = l01[1 + stride_x2]; cur_col[21] = l01[2 + stride_x2]; cur_col[22] = l01[1 + stride_x3]; cur_col[23] = l01[2 + stride_x3]; cur_col[24] = l02[0]; cur_col[25] = l02[1]; cur_col[26] = l02[0 + stride_x]; cur_col[27] = l02[1 + stride_x]; cur_col[28] = l02[0 + stride_x2]; cur_col[29] = l02[1 + stride_x2]; cur_col[30] = l02[0 + stride_x3]; cur_col[31] = l02[1 + stride_x3]; cur_col[32] = l02[2]; cur_col[33] = 0; cur_col[34] = l02[2 + stride_x]; cur_col[35] = 0; cur_col[36] = l02[2 + stride_x2]; cur_col[37] = 0; cur_col[38] = l02[2 + stride_x3]; cur_col[39] = 0; } } else { bool odd_line = false; int kchp = 0; int kyp = 0; for (int kch = 0; kch < input_chan; kch++) { for (int ky = 0; ky < 3; ky++) { if (odd_line) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp; imx[i] = imx_start[i] + 2; if (imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; imy[i] = imy_start[i] + ky; if (imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]); else *cur_col++ = 0; } for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + 1 + k; if (imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } odd_line = false; } // even line 2n else { for (int i = 0; i < 4; i++) imy[i] = imy_start[i] + ky; for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + k; if (imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } kchp = kch; kyp = ky; odd_line = true; } } } if (kernel_size1) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp; imx[i] = imx_start[i] + 2; if (imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; *cur_col++ = 0; } } } } int col_i = col_end & -4; if (col_end3) { int imx[4] = {0}; int imy[4] = {0}; int cnt_x[4] = {0}; int cnt_y[4] = {0}; int imx_start[4] = {0}; int imy_start[4] = {0}; int8_t* cur_col = col + col_i * kernel_size_aligned2; for (int i = 0; i < 4; i++) { cnt_y[i] = (col_i + i) / output_x; cnt_x[i] = col_i + i - cnt_y[i] * output_x; imx_start[i] = cnt_x[i] * stride_x - pad_x0; imy_start[i] = cnt_y[i] * stride_y - pad_y0; } bool odd_line = false; int kchp = 0; int kyp = 0; for (int kch = 0; kch < input_chan; kch++) { for (int ky = 0; ky < 3; ky++) { // odd line 1 + 2n if (odd_line) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp; imx[i] = imx_start[i] + 2; if ((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; imy[i] = imy_start[i] + ky; if ((i < col_end3) && imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]); else *cur_col++ = 0; } for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + (1 + k); if ((i < col_end3) && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } odd_line = false; } // even line 2n + 1 else { for (int i = 0; i < 4; i++) imy[i] = imy_start[i] + ky; for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + k; if (i < col_end3 && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } kchp = kch; kyp = ky; odd_line = true; } } } if (kernel_size1) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp; imx[i] = imx_start[i] + 2; if ((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; *cur_col++ = 0; } } } } // general case for kernel size <=3 else if ((kernel_x) < 4 && (kernel_y < 4)) { int kch[2], kx[2], ky[2], imx[4][2], imy[4][2]; for (int col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4) { int cnt_x[4] = {0}; int cnt_y[4] = {0}; int imx_start[4] = {0}; int imy_start[4] = {0}; int8_t* cur_col = col + col_i * kernel_size_aligned2; for (int i = 0; i < 4; i++) { cnt_y[i] = (col_i + i) / output_x; cnt_x[i] = col_i + i - cnt_y[i] * output_x; imx_start[i] = cnt_x[i] * stride_x - pad_x0; imy_start[i] = cnt_y[i] * stride_y - pad_y0; } for (int col_j = 0; col_j < (kernel_size & -2); col_j += 2) { for (int k = 0; k < 2; k++) { kch[k] = (col_j + k) / kernel_xy; ky[k] = (col_j + k - kch[k] * kernel_xy) / kernel_x; kx[k] = (col_j + k - kch[k] * kernel_xy) - ky[k] * kernel_x; ky[k] = ky[k] * dilation_y; kx[k] = kx[k] * dilation_x; for (int i = 0; i < 4; i++) { imx[i][k] = imx_start[i] + kx[k]; imy[i][k] = imy_start[i] + ky[k]; } } for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { if (imx[i][k] >= 0 && imx[i][k] < input_x && imy[i][k] >= 0 && imy[i][k] < input_y) *cur_col++ = *(im + input_xy * kch[k] + input_x * imy[i][k] + imx[i][k]); else *cur_col++ = 0; } } } int col_j = kernel_size & -2; if (kernel_size1) { kch[0] = col_j / kernel_xy; ky[0] = (col_j - kch[0] * kernel_xy) / kernel_x; kx[0] = col_j - kch[0] * kernel_xy - ky[0] * kernel_x; ky[0] = ky[0] * dilation_y; kx[0] = kx[0] * dilation_x; for (int i = 0; i < 4; i++) { imx[i][0] = imx_start[i] + kx[0]; imy[i][0] = imy_start[i] + ky[0]; if (imx[i][0] >= 0 && imx[i][0] < input_x && imy[i][0] >= 0 && imy[i][0] < input_y) *cur_col++ = *(im + input_xy * kch[0] + input_x * imy[i][0] + imx[i][0]); else *cur_col++ = 0; *cur_col++ = 0; } } } int col_i = col_end & -4; // final 4 input if (col_end3) { int cnt_x[4] = {0}; int cnt_y[4] = {0}; int imx_start[4] = {0}; int imy_start[4] = {0}; int8_t* cur_col = col + col_i * kernel_size_aligned2; for (int i = 0; i < 4; i++) { cnt_y[i] = (col_i + i) / output_x; cnt_x[i] = col_i + i - cnt_y[i] * output_x; imx_start[i] = cnt_x[i] * stride_x - pad_x0; imy_start[i] = cnt_y[i] * stride_y - pad_y0; } for (int col_j = 0; col_j < (kernel_size & -2); col_j += 2) { for (int k = 0; k < 2; k++) { kch[k] = (col_j + k) / kernel_xy; ky[k] = (col_j + k - kch[k] * kernel_xy) / kernel_x; kx[k] = (col_j + k - kch[k] * kernel_xy) - ky[k] * kernel_x; ky[k] = ky[k] * dilation_y; kx[k] = kx[k] * dilation_x; for (int i = 0; i < 4; i++) { imx[i][k] = imx_start[i] + kx[k]; imy[i][k] = imy_start[i] + ky[k]; } } for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { if ((col_i + i) < col_end && imx[i][k] >= 0 && imx[i][k] < input_x && imy[i][k] >= 0 && imy[i][k] < input_y) *cur_col++ = *(im + input_xy * kch[k] + input_x * imy[i][k] + imx[i][k]); else *cur_col++ = 0; } } } int col_j = kernel_size & -2; if (kernel_size1) { kch[0] = col_j / kernel_xy; ky[0] = (col_j - kch[0] * kernel_xy) / kernel_x; kx[0] = col_j - kch[0] * kernel_xy - ky[0] * kernel_x; ky[0] = ky[0] * dilation_y; kx[0] = kx[0] * dilation_x; for (int i = 0; i < 4; i++) { imx[i][0] = imx_start[i] + kx[0]; imy[i][0] = imy_start[i] + ky[0]; if ((col_i + i) < col_end && imx[i][0] >= 0 && imx[i][0] < input_x && imy[i][0] >= 0 && imy[i][0] < input_y) *cur_col++ = *(im + input_xy * kch[0] + input_x * imy[i][0] + imx[i][0]); else *cur_col++ = 0; *cur_col++ = 0; } } } } // general case for kernel size >=3 else { int kch, kx, ky, kchp, kyp, imx[4], imy[4]; int kernel_x1 = kernel_x & 0x1; int8_t* cur_col = col + col_start * kernel_size_aligned2; for (int col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4) { int cnt_x[4] = {0}; int cnt_y[4] = {0}; int imx_start[4] = {0}; int imy_start[4] = {0}; for (int i = 0; i < 4; i++) { cnt_y[i] = (col_i + i) / output_x; cnt_x[i] = col_i + i - cnt_y[i] * output_x; imx_start[i] = cnt_x[i] * stride_x - pad_x0; imy_start[i] = cnt_y[i] * stride_y - pad_y0; } bool odd_line = false; kchp = 0; kyp = 0; for (int kch = 0; kch < input_chan; kch++) { for (int ky = 0; ky < kernel_y; ky++) { // odd line 2 + 2n if (odd_line) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp * dilation_y; imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x; if (imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; imy[i] = imy_start[i] + ky * dilation_y; if (imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]); else *cur_col++ = 0; } for (int kx = 1; kx < kernel_x; kx += 2) { for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + (kx + k) * dilation_x; if (imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } } odd_line = false; } // even line 2n else { for (int i = 0; i < 4; i++) imy[i] = imy_start[i] + ky * dilation_y; for (int kx = 0; kx < (kernel_x - 1); kx += 2) { for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + (kx + k) * dilation_x; if (imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } } kchp = kch; kyp = ky; odd_line = kernel_x1 ? true : false; } } } if (kernel_size1) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp * dilation_y; imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x; if (imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; *cur_col++ = 0; } } } int col_i = col_end & -4; // final 4 input if (col_end3) { int cnt_x[4] = {0}; int cnt_y[4] = {0}; int imx_start[4] = {0}; int imy_start[4] = {0}; for (int i = 0; i < 4; i++) { cnt_y[i] = (col_i + i) / output_x; cnt_x[i] = col_i + i - cnt_y[i] * output_x; imx_start[i] = cnt_x[i] * stride_x - pad_x0; imy_start[i] = cnt_y[i] * stride_y - pad_y0; } bool odd_line = false; kchp = 0; kyp = 0; for (int kch = 0; kch < input_chan; kch++) { for (int ky = 0; ky < kernel_y; ky++) { // odd line 1 + 2n if (odd_line) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp * dilation_y; imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x; if ((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; imy[i] = imy_start[i] + ky * dilation_y; if ((i < col_end3) && imx_start[i] >= 0 && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx_start[i]); else *cur_col++ = 0; } for (int kx = 1; kx < kernel_x; kx += 2) { for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + (kx + k) * dilation_x; if ((i < col_end3) && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } } odd_line = false; } // even line 2n + 1 else { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + ky * dilation_y; } for (int kx = 0; kx < (kernel_x - 1); kx += 2) { for (int i = 0; i < 4; i++) { for (int k = 0; k < 2; k++) { imx[i] = imx_start[i] + (kx + k) * dilation_x; if (i < col_end3 && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]); else *cur_col++ = 0; } } } kchp = kch; kyp = ky; odd_line = kernel_x1 ? true : false; } } } if (kernel_size1) { for (int i = 0; i < 4; i++) { imy[i] = imy_start[i] + kyp * dilation_y; imx[i] = imx_start[i] + (kernel_x - 1) * dilation_x; if ((i < col_end3) && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y) *cur_col++ = *(im + input_xy * kchp + input_x * imy[i] + imx[i]); else *cur_col++ = 0; *cur_col++ = 0; } } } } #endif return; } int int8_conv_hcl_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int in_c = input_tensor->dims[1]; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int out_c = output_tensor->dims[1]; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; /* alloc mem of im2col */ if (!priv_info->external_im2col_mem) { int mem_size = int8_conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } /* alloc mem of kernel interleave */ if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor, param); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } /* kernel interleave */ interleave_int8(filter_tensor, priv_info, param); priv_info->multi = (int*)sys_malloc(out_c * sizeof(int)); priv_info->q_shift = (int*)sys_malloc(out_c * sizeof(int)); float input_scale = input_tensor->scale; float* kernel_scales = filter_tensor->scale_list; float output_scale = output_tensor->scale; priv_info->activation_min = -127; priv_info->activation_max = 127; /* set activation */ if (param->activation >= 0) { priv_info->activation_min = 0; if (param->activation == 1) priv_info->activation_max = round(1.0 / output_scale); if (param->activation == 6) priv_info->activation_max = round(6.0 / output_scale); if (priv_info->activation_max > 127) priv_info->activation_max = 127; } for (int i = 0; i < out_c; i++) { float kernel_scale = kernel_scales[i]; float scale = input_scale * kernel_scale / output_scale; int shift; float q = frexp(scale, &shift); int fix_q = round(q * (1ll << 31)); // TLOG_ERR("prerun: %f,%lld,%d,%d, %lld\n",q, fix_q, multi, q_shift, 1ll<<31); if (fix_q == (1l << 31)) { fix_q /= 2; shift++; } priv_info->multi[i] = (int)fix_q; priv_info->q_shift[i] = (int)shift; } return 0; } int int8_conv_hcl_postrun(struct conv_priv_info* priv_info) { if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } if (priv_info->multi) { sys_free(priv_info->multi); priv_info->multi = NULL; } if (priv_info->q_shift) { sys_free(priv_info->q_shift); priv_info->q_shift = NULL; } return 0; } int int8_conv_hcl_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { /* param */ int group = param->group; int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_h1 = param->pad_h1; int pad_w0 = param->pad_w0; int pad_w1 = param->pad_w1; int act_type = param->activation; int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1] / group; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int input_image_size = input_tensor->dims[1] * input_tensor->dims[2] * input_tensor->dims[3]; int out_c = output_tensor->dims[1] / group; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; int out_c_align = ((out_c + 3) & -4); int output_image_size = output_tensor->dims[1] * output_tensor->dims[2] * output_tensor->dims[3]; int activation_min = priv_info->activation_min; int activation_max = priv_info->activation_max; /* buffer addr */ int8_t* input_buf = (int8_t*)input_tensor->data; int8_t* output_buf = (int8_t*)output_tensor->data; int32_t* biases_buf = NULL; bool have_biases = false; if (bias_tensor != NULL) { biases_buf = (int32_t*)bias_tensor->data; have_biases = true; } int8_t* col_buf = (int8_t*)priv_info->im2col_buffer; int8_t* interleave_buf = (int8_t*)priv_info->interleave_buffer; /* block size split parameter */ int L2_CACHE_SIZE = (cpu_affinity == TENGINE_CLUSTER_LITTLE) ? 512 * 1024 : 1024 * 1024; int kernel_size_l1 = kernel_size; #ifdef __aarch64__ int col_cnt_l2 = L2_CACHE_SIZE * 3 / kernel_size_l1 / 4; #else int col_cnt_l2 = L2_CACHE_SIZE / 4 / kernel_size_l1 * 3 / 4; #endif col_cnt_l2 = col_cnt_l2 > 4 ? (col_cnt_l2 & -4) : 4; for (int n = 0; n < batch; n++) // batch size { int8_t* input = input_buf + n * input_size * group; int8_t* output = output_buf + n * output_size * group; for (int g = 0; g < group; g++) { int8_t* cur_input = input + g * input_size; im2col_int8(cur_input, col_buf, in_c, in_w, in_h, kernel_w, kernel_h, stride_w, stride_h, dilation_w, dilation_h, pad_w0, pad_w1, pad_h0, pad_h1, out_w, out_h, num_thread); int kernel_size_aligned2 = (kernel_size + 1) & -2; int output_chan_aligned4 = (out_c + 3) & -4; int8_t* kernel_g = interleave_buf + g * kernel_size_aligned2 * output_chan_aligned4; int8_t* output_g = output + g * output_size; int* bias_g = have_biases ? (biases_buf + g * out_c) : NULL; int* multi_g = priv_info->multi + g * out_c; int* q_shift_g = priv_info->q_shift + g * out_c; // for input block of L2 cache size for (int col_i = 0; col_i < out_hw; col_i += col_cnt_l2) { int col_start = col_i; int col_end = col_i + col_cnt_l2; col_end = col_end > out_hw ? out_hw : col_end; #ifdef __aarch64__ i8gemm4x16(col_buf, kernel_g, have_biases, bias_g, output_g, multi_g, kernel_size, out_hw, col_start, col_end, 0, out_c & -16, activation_min, activation_max, q_shift_g, num_thread, cpu_affinity); if (out_c & 0xf) i8gemm4x4(col_buf, kernel_g, have_biases, bias_g, output_g, multi_g, kernel_size, out_hw, col_start, col_end, out_c & -16, out_c, activation_min, activation_max, q_shift_g, num_thread, cpu_affinity); #else i8gemm4x8(col_buf, kernel_g, have_biases, bias_g, output_g, multi_g, kernel_size, out_hw, col_start, col_end, 0, out_c & -8, activation_min, activation_max, q_shift_g, num_thread, cpu_affinity); if (out_c & 0x7) i8gemm4x4(col_buf, kernel_g, have_biases, bias_g, output_g, multi_g, kernel_size, out_hw, col_start, col_end, out_c & -8, out_c, activation_min, activation_max, q_shift_g, num_thread, cpu_affinity); #endif } // col_cont } } return 0; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(3*t1,2)),ceild(24*t2-Nz+5,8)),3*t1-3*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(12*t1+Ny+15,8)),floord(24*t2+Ny+11,8)),floord(24*t1-24*t2+Nz+Ny+13,8));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-254,256)),ceild(3*t1-510,512)),ceild(24*t2-Nz-2035,2048)),ceild(8*t3-Ny-2035,2048));t4<=min(min(min(min(floord(4*Nt+Nx-9,2048),floord(12*t1+Nx+15,2048)),floord(24*t2+Nx+11,2048)),floord(8*t3+Nx-5,2048)),floord(24*t1-24*t2+Nz+Nx+13,2048));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),2*t3),Nt-1),3*t1+5),6*t2+4),512*t4+510);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(2048*t4,4*t5+4); ubv=min(2048*t4+2047,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
update_ops_named_CZ.c
#include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _USE_SIMD #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #endif //void CZ_gate_old_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); //void CZ_gate_old_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); //void CZ_gate_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); //void CZ_gate_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); void CZ_gate(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { //CZ_gate_old_single(control_qubit_index, target_qubit_index, state, dim); //CZ_gate_old_parallel(control_qubit_index, target_qubit_index, state, dim); //CZ_gate_single(control_qubit_index, target_qubit_index, state, dim); //CZ_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim); //CZ_gate_single_simd(control_qubit_index, target_qubit_index, state, dim); //CZ_gate_parallel(control_qubit_index, target_qubit_index, state, dim); //return; #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { CZ_gate_single_simd(control_qubit_index, target_qubit_index, state, dim); } else { CZ_gate_parallel_simd(control_qubit_index, target_qubit_index, state, dim); } #else CZ_gate_single_simd(control_qubit_index, target_qubit_index, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { CZ_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim); } else { CZ_gate_parallel_unroll(control_qubit_index, target_qubit_index, state, dim); } #else CZ_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim); #endif #endif } void CZ_gate_single_unroll(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE mask = target_mask + control_mask; ITYPE state_index = 0; if (target_qubit_index == 0 || control_qubit_index==0) { for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; } }else { for (state_index = 0; state_index < loop_dim; state_index+=2) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; state[basis_index+1] *= -1; } } } #ifdef _OPENMP void CZ_gate_parallel_unroll(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE mask = target_mask + control_mask; ITYPE state_index = 0; if (target_qubit_index == 0 || control_qubit_index == 0) { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; } } else { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; state[basis_index + 1] *= -1; } } } #endif #ifdef _USE_SIMD void CZ_gate_single_simd(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE mask = target_mask + control_mask; ITYPE state_index = 0; if (target_qubit_index == 0 || control_qubit_index == 0) { for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; } } else { __m256d minus_one = _mm256_set_pd(-1, -1, -1, -1); for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; double* ptr = (double*)(state + basis_index); __m256d data = _mm256_loadu_pd(ptr); data = _mm256_mul_pd(data,minus_one); _mm256_storeu_pd(ptr, data); } } } #ifdef _OPENMP void CZ_gate_parallel_simd(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE mask = target_mask + control_mask; ITYPE state_index = 0; if (target_qubit_index == 0 || control_qubit_index == 0) { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; } } else { __m256d minus_one = _mm256_set_pd(-1, -1, -1, -1); #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; double* ptr = (double*)(state + basis_index); __m256d data = _mm256_loadu_pd(ptr); data = _mm256_mul_pd(data, minus_one); _mm256_storeu_pd(ptr, data); } } } #endif #endif /* void CZ_gate_old_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << max_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_insert_only_min = insert_zero_to_basis_index(state_index, min_qubit_mask, min_qubit_index); ITYPE basis_c1t1 = insert_zero_to_basis_index(basis_insert_only_min, max_qubit_mask, max_qubit_index) ^ control_mask ^ target_mask; state[basis_c1t1] *= -1; } } #ifdef _OPENMP void CZ_gate_old_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << max_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_insert_only_min = insert_zero_to_basis_index(state_index, min_qubit_mask, min_qubit_index); ITYPE basis_c1t1 = insert_zero_to_basis_index(basis_insert_only_min, max_qubit_mask, max_qubit_index) ^ control_mask ^ target_mask; state[basis_c1t1] *= -1; } } #endif void CZ_gate_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE mask = target_mask + control_mask; ITYPE state_index = 0; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; } } #ifdef _OPENMP void CZ_gate_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE mask = target_mask + control_mask; ITYPE state_index = 0; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; } } #endif */
convolution_1x1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; int remain = size; #if __AVX__ || __SSE__ __m128 k_data = _mm_loadu_ps(kernel0); for (; remain > 0; remain--) { float r_array[4] = { *r0, *r1, *r2, *r3 }; __m128 r_data = _mm_loadu_ps(r_array); __m128 sum = _mm_mul_ps(k_data, r_data); *outptr += sum.m128_f32[0] + sum.m128_f32[1] + sum.m128_f32[2] + sum.m128_f32[3]; r0++; r1++; r2++; r3++; outptr++; } #else for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0++; r1++; r2++; r3++; outptr++; } #endif } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; int size = outw * outh; int remain = size; #if __AVX__ || __SSE__ #if __AVX__ int circle_num = size / 8; __m256 k_data = _mm256_set1_ps(k0); int index = 0; for (; index < circle_num; index++) { int index_offset = index * 8; __m256 out_data = _mm256_loadu_ps(outptr + index_offset); __m256 r_data = _mm256_loadu_ps(r0 + index_offset); out_data = _mm256_add_ps(_mm256_mul_ps(r_data, k_data), out_data); _mm256_storeu_ps(outptr + index_offset, out_data); } for (index = 8 * index; index < size; index++) { outptr[index] += r0[index] * k0; } #else int circle_num = size / 4; __m128 k_data = _mm_set1_ps(k0); int index = 0; for (; index < circle_num; index++) { int index_offset = index * 4; __m128 out_data = _mm_loadu_ps(outptr + index_offset); __m128 r_data = _mm_loadu_ps(r0 + index_offset); out_data = _mm_add_ps(_mm_mul_ps(r_data, k_data), out_data); _mm_storeu_ps(outptr + index_offset, out_data); } for (index = 4 * index; index < size; index++) { outptr[index] += r0[index] * k0; } #endif #else for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0++; outptr++; } #endif } } } static void conv1x1s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; #if __AVX__ || __SSE__ float k_array[4] = { k0, k1, k2, k3 }; __m128 k_data = _mm_loadu_ps(k_array); for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float r_array[4] = { *r0, *r1, *r2, *r3 }; __m128 r_data = _mm_loadu_ps(r_array); __m128 sum = _mm_mul_ps(k_data, r_data); *outptr += sum.m128_f32[0] + sum.m128_f32[1] + sum.m128_f32[2] + sum.m128_f32[3]; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } #else for (int i = 0; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } #endif } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0 += 2; outptr++; } r0 += tailstep; } } } }
project.c
//----------------------------------------------------------------------------- // project.c // // Project: EPA SWMM5 // Version: 5.1 // Date: 03/19/14 (Build 5.1.000) // 04/14/14 (Build 5.1.004) // 09/15/14 (Build 5.1.007) // 03/19/15 (Build 5.1.008) // 04/30/15 (Build 5.1.009) // 08/01/16 (Build 5.1.011) // 03/14/17 (Build 5.1.012) // 05/10/18 (Build 5.1.013) // Author: L. Rossman // // Project management functions. // // This module provides project-related services such as: // o opening a new project and reading its input data // o allocating and freeing memory for project objects // o setting default values for object properties and options // o initializing the internal state of all objects // o managing hash tables for identifying objects by ID name // // Build 5.1.004: // - Ignore RDII option added. // // Build 5.1.007: // - Default monthly adjustments for climate variables included. // - User-supplied GW flow equations initialized to NULL. // - Storage node exfiltration object initialized to NULL. // - Freeing of memory used for storage node exfiltration included. // // Build 5.1.008: // - Constants used for dynamic wave routing moved to dynwave.c. // - Input processing of minimum time step & number of // parallel threads for dynamic wave routing added. // - Default values of hyd. conductivity adjustments added. // - Freeing of memory used for outfall pollutant load added. // // Build 5.1.009: // - Fixed bug in computing total duration introduced in 5.1.008. // // Build 5.1.011: // - Memory management of hydraulic event dates array added. // // Build 5.1.012: // - Minimum conduit slope option initialized to 0 (none). // - NO/YES no longer accepted as options for NORMAL_FLOW_LIMITED. // // Build 5.1.013: // - omp_get_num_threads function protected against lack of compiler // support for OpenMP. // - Rain gage validation now performed after subcatchment validation. // - More robust parsing of MinSurfarea option provided. // - Support added for new RuleStep analysis option. // //----------------------------------------------------------------------------- #define _CRT_SECURE_NO_DEPRECATE #include <stdlib.h> #include <string.h> #include <stdlib.h> #include <math.h> #if defined(_OPENMP) //(5.1.013) #include <omp.h> // #else // int omp_get_num_threads(void) { return 1;} // #endif // #include "headers.h" #include "lid.h" #include "hash.h" #include "mempool.h" //----------------------------------------------------------------------------- // Shared variables //----------------------------------------------------------------------------- static HTtable* Htable[MAX_OBJ_TYPES]; // Hash tables for object ID names static char MemPoolAllocated; // TRUE if memory pool allocated //----------------------------------------------------------------------------- // External Functions (declared in funcs.h) //----------------------------------------------------------------------------- // project_open (called from swmm_open in swmm5.c) // project_close (called from swmm_close in swmm5.c) // project_readInput (called from swmm_open in swmm5.c) // project_readOption (called from readOption in input.c) // project_validate (called from swmm_open in swmm5.c) // project_init (called from swmm_start in swmm5.c) // project_addObject (called from addObject in input.c) // project_createMatrix (called from openFileForInput in iface.c) // project_freeMatrix (called from iface_closeRoutingFiles) // project_findObject // project_findID //----------------------------------------------------------------------------- // Function declarations //----------------------------------------------------------------------------- static void initPointers(void); static void setDefaults(void); static void openFiles(char *f1, char *f2, char *f3); static void createObjects(void); static void deleteObjects(void); static void createHashTables(void); static void deleteHashTables(void); //============================================================================= void project_open(char *f1, char *f2, char *f3) // // Input: f1 = pointer to name of input file // f2 = pointer to name of report file // f3 = pointer to name of binary output file // Output: none // Purpose: opens a new SWMM project. // { initPointers(); setDefaults(); openFiles(f1, f2, f3); } //============================================================================= void project_readInput() // // Input: none // Output: none // Purpose: retrieves project data from input file. // { // --- create hash tables for fast retrieval of objects by ID names createHashTables(); // --- count number of objects in input file and create them input_countObjects(); createObjects(); // --- read project data from input file input_readData(); if ( ErrorCode ) return; // --- establish starting & ending date/time StartDateTime = StartDate + StartTime; EndDateTime = EndDate + EndTime; ReportStart = ReportStartDate + ReportStartTime; ReportStart = MAX(ReportStart, StartDateTime); // --- check for valid starting & ending date/times if ( EndDateTime <= StartDateTime ) { report_writeErrorMsg(ERR_START_DATE, ""); } else if ( EndDateTime <= ReportStart ) { report_writeErrorMsg(ERR_REPORT_DATE, ""); } else { // --- compute total duration of simulation in seconds TotalDuration = floor((EndDateTime - StartDateTime) * SECperDAY); // --- reporting step must be <= total duration if ( (double)ReportStep > TotalDuration ) { ReportStep = (int)(TotalDuration); } // --- reporting step can't be < routing step if ( (double)ReportStep < RouteStep ) { report_writeErrorMsg(ERR_REPORT_STEP, ""); } // --- convert total duration to milliseconds TotalDuration *= 1000.0; } } //============================================================================= void project_validate() // // Input: none // Output: none // Purpose: checks validity of project data. // { int i; int j; int err; // --- validate Curves and TimeSeries for ( i=0; i<Nobjects[CURVE]; i++ ) { err = table_validate(&Curve[i]); if ( err ) report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID); } for ( i=0; i<Nobjects[TSERIES]; i++ ) { err = table_validate(&Tseries[i]); if ( err ) report_writeTseriesErrorMsg(err, &Tseries[i]); } // --- validate hydrology objects // (NOTE: order is important !!!!) climate_validate(); lid_validate(); if ( Nobjects[SNOWMELT] == 0 ) IgnoreSnowmelt = TRUE; if ( Nobjects[AQUIFER] == 0 ) IgnoreGwater = TRUE; for ( i=0; i<Nobjects[AQUIFER]; i++ ) gwater_validateAquifer(i); for ( i=0; i<Nobjects[SUBCATCH]; i++ ) subcatch_validate(i); for ( i=0; i<Nobjects[GAGE]; i++ ) gage_validate(i); //(5.1.013) for ( i=0; i<Nobjects[SNOWMELT]; i++ ) snow_validateSnowmelt(i); // --- compute geometry tables for each shape curve j = 0; for ( i=0; i<Nobjects[CURVE]; i++ ) { if ( Curve[i].curveType == SHAPE_CURVE ) { Curve[i].refersTo = j; Shape[j].curve = i; if ( !shape_validate(&Shape[j], &Curve[i]) ) report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID); j++; } } // --- validate links before nodes, since the latter can // result in adjustment of node depths for ( i=0; i<Nobjects[NODE]; i++) Node[i].oldDepth = Node[i].fullDepth; for ( i=0; i<Nobjects[LINK]; i++) link_validate(i); for ( i=0; i<Nobjects[NODE]; i++) node_validate(i); // --- adjust time steps if necessary if ( DryStep < WetStep ) { report_writeWarningMsg(WARN06, ""); DryStep = WetStep; } if ( RouteStep > (double)WetStep ) { report_writeWarningMsg(WARN07, ""); RouteStep = WetStep; } // --- adjust individual reporting flags to match global reporting flag if ( RptFlags.subcatchments == ALL ) for (i=0; i<Nobjects[SUBCATCH]; i++) Subcatch[i].rptFlag = TRUE; if ( RptFlags.nodes == ALL ) for (i=0; i<Nobjects[NODE]; i++) Node[i].rptFlag = TRUE; if ( RptFlags.links == ALL ) for (i=0; i<Nobjects[LINK]; i++) Link[i].rptFlag = TRUE; // --- validate dynamic wave options if ( RouteModel == DW ) dynwave_validate(); // --- adjust number of parallel threads to be used //(5.1.013) #pragma omp parallel //(5.1.008) { if ( NumThreads == 0 ) NumThreads = omp_get_num_threads(); //(5.1.008) else NumThreads = MIN(NumThreads, omp_get_num_threads()); //(5.1.008) } if ( Nobjects[LINK] < 4 * NumThreads ) NumThreads = 1; //(5.1.008) } //============================================================================= void project_close() // // Input: none // Output: none // Purpose: closes a SWMM project. // { deleteObjects(); deleteHashTables(); } //============================================================================= int project_init(void) // // Input: none // Output: returns an error code // Purpose: initializes the internal state of all objects. // { int j; climate_initState(); lid_initState(); for (j=0; j<Nobjects[TSERIES]; j++) table_tseriesInit(&Tseries[j]); for (j=0; j<Nobjects[GAGE]; j++) gage_initState(j); for (j=0; j<Nobjects[SUBCATCH]; j++) subcatch_initState(j); for (j=0; j<Nobjects[NODE]; j++) node_initState(j); for (j=0; j<Nobjects[LINK]; j++) link_initState(j); return ErrorCode; } //============================================================================= int project_addObject(int type, char *id, int n) // // Input: type = object type // id = object ID string // n = object index // Output: returns 0 if object already added, 1 if not, -1 if hashing fails // Purpose: adds an object ID to a hash table // { int result; int len; char *newID; // --- do nothing if object already placed in hash table if ( project_findObject(type, id) >= 0 ) return 0; // --- use memory from the hash tables' common memory pool to store // a copy of the object's ID string len = strlen(id) + 1; newID = (char *) Alloc(len*sizeof(char)); strcpy(newID, id); // --- insert object's ID into the hash table for that type of object result = HTinsert(Htable[type], newID, n); if ( result == 0 ) result = -1; return result; } //============================================================================= int project_findObject(int type, char *id) // // Input: type = object type // id = object ID // Output: returns index of object with given ID, or -1 if ID not found // Purpose: uses hash table to find index of an object with a given ID. // { return HTfind(Htable[type], id); } //============================================================================= char *project_findID(int type, char *id) // // Input: type = object type // id = ID name being sought // Output: returns pointer to location where object's ID string is stored // Purpose: uses hash table to find address of given string entry. // { return HTfindKey(Htable[type], id); } //============================================================================= double ** project_createMatrix(int nrows, int ncols) // // Input: nrows = number of rows (0-based) // ncols = number of columns (0-based) // Output: returns a pointer to a matrix // Purpose: allocates memory for a matrix of doubles. // { int i,j; double **a; // --- allocate pointers to rows a = (double **) malloc(nrows * sizeof(double *)); if ( !a ) return NULL; // --- allocate rows and set pointers to them a[0] = (double *) malloc (nrows * ncols * sizeof(double)); if ( !a[0] ) return NULL; for ( i = 1; i < nrows; i++ ) a[i] = a[i-1] + ncols; for ( i = 0; i < nrows; i++) { for ( j = 0; j < ncols; j++) a[i][j] = 0.0; } // --- return pointer to array of pointers to rows return a; } //============================================================================= void project_freeMatrix(double **a) // // Input: a = matrix of floats // Output: none // Purpose: frees memory allocated for a matrix of doubles. // { if ( a != NULL ) { if ( a[0] != NULL ) free( a[0] ); free( a ); } } //============================================================================= int project_readOption(char* s1, char* s2) // // Input: s1 = option keyword // s2 = string representation of option's value // Output: returns error code // Purpose: reads a project option from a pair of string tokens. // // NOTE: all project options have default values assigned in setDefaults(). // { int k, m, h, s; double tStep; char strDate[25]; DateTime aTime; DateTime aDate; // --- determine which option is being read k = findmatch(s1, OptionWords); if ( k < 0 ) return error_setInpError(ERR_KEYWORD, s1); switch ( k ) { // --- choice of flow units case FLOW_UNITS: m = findmatch(s2, FlowUnitWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); FlowUnits = m; if ( FlowUnits <= MGD ) UnitSystem = US; else UnitSystem = SI; break; // --- choice of infiltration modeling method case INFIL_MODEL: m = findmatch(s2, InfilModelWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); InfilModel = m; break; // --- choice of flow routing method case ROUTE_MODEL: m = findmatch(s2, RouteModelWords); if ( m < 0 ) m = findmatch(s2, OldRouteModelWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); if ( m == NO_ROUTING ) IgnoreRouting = TRUE; else RouteModel = m; if ( RouteModel == EKW ) RouteModel = KW; break; // --- simulation start date case START_DATE: if ( !datetime_strToDate(s2, &StartDate) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- simulation start time of day case START_TIME: if ( !datetime_strToTime(s2, &StartTime) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- simulation ending date case END_DATE: if ( !datetime_strToDate(s2, &EndDate) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- simulation ending time of day case END_TIME: if ( !datetime_strToTime(s2, &EndTime) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- reporting start date case REPORT_START_DATE: if ( !datetime_strToDate(s2, &ReportStartDate) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- reporting start time of day case REPORT_START_TIME: if ( !datetime_strToTime(s2, &ReportStartTime) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- day of year when street sweeping begins or when it ends // (year is arbitrarily set to 1947 so that the dayOfYear // function can be applied) case SWEEP_START: case SWEEP_END: strcpy(strDate, s2); strcat(strDate, "/1947"); if ( !datetime_strToDate(strDate, &aDate) ) { return error_setInpError(ERR_DATETIME, s2); } m = datetime_dayOfYear(aDate); if ( k == SWEEP_START ) SweepStart = m; else SweepEnd = m; break; // --- number of antecedent dry days case START_DRY_DAYS: StartDryDays = atof(s2); if ( StartDryDays < 0.0 ) { return error_setInpError(ERR_NUMBER, s2); } break; // --- runoff or reporting time steps // (input is in hrs:min:sec format, time step saved as seconds) case WET_STEP: case DRY_STEP: case REPORT_STEP: case RULE_STEP: //(5.1.013) if ( !datetime_strToTime(s2, &aTime) ) { return error_setInpError(ERR_DATETIME, s2); } datetime_decodeTime(aTime, &h, &m, &s); h += 24*(int)aTime; s = s + 60*m + 3600*h; // --- RuleStep allowed to be 0 while other time steps must be > 0 //(5.1.013) if (k == RULE_STEP) // { // if (s < 0) return error_setInpError(ERR_NUMBER, s2); // } // else if ( s <= 0 ) return error_setInpError(ERR_NUMBER, s2); // switch ( k ) { case WET_STEP: WetStep = s; break; case DRY_STEP: DryStep = s; break; case REPORT_STEP: ReportStep = s; break; case RULE_STEP: RuleStep = s; break; //(5.1.013) } break; // --- type of damping applied to inertial terms of dynamic wave routing case INERT_DAMPING: m = findmatch(s2, InertDampingWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); else InertDamping = m; break; // --- Yes/No options (NO = 0, YES = 1) case ALLOW_PONDING: case SLOPE_WEIGHTING: case SKIP_STEADY_STATE: case IGNORE_RAINFALL: case IGNORE_SNOWMELT: case IGNORE_GWATER: case IGNORE_ROUTING: case IGNORE_QUALITY: case IGNORE_RDII: m = findmatch(s2, NoYesWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); switch ( k ) { case ALLOW_PONDING: AllowPonding = m; break; case SLOPE_WEIGHTING: SlopeWeighting = m; break; case SKIP_STEADY_STATE: SkipSteadyState = m; break; case IGNORE_RAINFALL: IgnoreRainfall = m; break; case IGNORE_SNOWMELT: IgnoreSnowmelt = m; break; case IGNORE_GWATER: IgnoreGwater = m; break; case IGNORE_ROUTING: IgnoreRouting = m; break; case IGNORE_QUALITY: IgnoreQuality = m; break; case IGNORE_RDII: IgnoreRDII = m; break; } break; case NORMAL_FLOW_LTD: m = findmatch(s2, NormalFlowWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); NormalFlowLtd = m; break; case FORCE_MAIN_EQN: m = findmatch(s2, ForceMainEqnWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); ForceMainEqn = m; break; case LINK_OFFSETS: m = findmatch(s2, LinkOffsetWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); LinkOffsets = m; break; // --- compatibility option for selecting solution method for // dynamic wave flow routing (NOT CURRENTLY USED) case COMPATIBILITY: if ( strcomp(s2, "3") ) Compatibility = SWMM3; else if ( strcomp(s2, "4") ) Compatibility = SWMM4; else if ( strcomp(s2, "5") ) Compatibility = SWMM5; else return error_setInpError(ERR_KEYWORD, s2); break; // --- routing or lengthening time step (in decimal seconds) // (lengthening time step is used in Courant stability formula // to artificially lengthen conduits for dynamic wave flow routing // (a value of 0 means that no lengthening is used)) case ROUTE_STEP: case LENGTHENING_STEP: if ( !getDouble(s2, &tStep) ) { if ( !datetime_strToTime(s2, &aTime) ) { return error_setInpError(ERR_NUMBER, s2); } else { datetime_decodeTime(aTime, &h, &m, &s); h += 24*(int)aTime; s = s + 60*m + 3600*h; tStep = s; } } if ( k == ROUTE_STEP ) { if ( tStep <= 0.0 ) return error_setInpError(ERR_NUMBER, s2); RouteStep = tStep; } else LengtheningStep = MAX(0.0, tStep); break; // --- minimum variable time step for dynamic wave routing case MIN_ROUTE_STEP: if ( !getDouble(s2, &MinRouteStep) || MinRouteStep < 0.0 ) return error_setInpError(ERR_NUMBER, s2); break; case NUM_THREADS: m = atoi(s2); if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2); NumThreads = m; break; // --- safety factor applied to variable time step estimates under // dynamic wave flow routing (value of 0 indicates that variable // time step option not used) case VARIABLE_STEP: if ( !getDouble(s2, &CourantFactor) ) return error_setInpError(ERR_NUMBER, s2); if ( CourantFactor < 0.0 || CourantFactor > 2.0 ) return error_setInpError(ERR_NUMBER, s2); break; // --- minimum surface area (ft2 or sq. meters) associated with nodes // under dynamic wave flow routing case MIN_SURFAREA: if (!getDouble(s2, &MinSurfArea)) //(5.1.013) return error_setInpError(ERR_NUMBER, s2); //(5.1.013) if (MinSurfArea < 0.0) //(5.1.013) return error_setInpError(ERR_NUMBER, s2); //(5.1.013) break; // --- minimum conduit slope (%) case MIN_SLOPE: if ( !getDouble(s2, &MinSlope) ) return error_setInpError(ERR_NUMBER, s2); if ( MinSlope < 0.0 || MinSlope >= 100 ) return error_setInpError(ERR_NUMBER, s2); MinSlope /= 100.0; break; // --- maximum trials / time step for dynamic wave routing case MAX_TRIALS: m = atoi(s2); if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2); MaxTrials = m; break; // --- head convergence tolerance for dynamic wave routing case HEAD_TOL: if ( !getDouble(s2, &HeadTol) ) { return error_setInpError(ERR_NUMBER, s2); } break; // --- steady state tolerance on system inflow - outflow case SYS_FLOW_TOL: if ( !getDouble(s2, &SysFlowTol) ) { return error_setInpError(ERR_NUMBER, s2); } SysFlowTol /= 100.0; break; // --- steady state tolerance on nodal lateral inflow case LAT_FLOW_TOL: if ( !getDouble(s2, &LatFlowTol) ) { return error_setInpError(ERR_NUMBER, s2); } LatFlowTol /= 100.0; break; // --- method used for surcharging in dynamic wave flow routing //(5.1.013) case SURCHARGE_METHOD: m = findmatch(s2, SurchargeWords); if (m < 0) return error_setInpError(ERR_KEYWORD, s2); SurchargeMethod = m; break; case TEMPDIR: // Temporary Directory sstrncpy(TempDir, s2, MAXFNAME); break; } return 0; } //============================================================================= void initPointers() // // Input: none // Output: none // Purpose: assigns NULL to all dynamic arrays for a new project. // { Gage = NULL; Subcatch = NULL; Node = NULL; Outfall = NULL; Divider = NULL; Storage = NULL; Link = NULL; Conduit = NULL; Pump = NULL; Orifice = NULL; Weir = NULL; Outlet = NULL; Pollut = NULL; Landuse = NULL; Pattern = NULL; Curve = NULL; Tseries = NULL; Transect = NULL; Shape = NULL; Aquifer = NULL; UnitHyd = NULL; Snowmelt = NULL; Event = NULL; MemPoolAllocated = FALSE; } //============================================================================= void setDefaults() // // Input: none // Output: none // Purpose: assigns default values to project variables. // { int i, j; // Project title & temp. file path for (i = 0; i < MAXTITLE; i++) strcpy(Title[i], ""); strcpy(TempDir, ""); // Interface files Frain.mode = SCRATCH_FILE; // Use scratch rainfall file Fclimate.mode = NO_FILE; Frunoff.mode = NO_FILE; Frdii.mode = NO_FILE; Fhotstart1.mode = NO_FILE; Fhotstart2.mode = NO_FILE; Finflows.mode = NO_FILE; Foutflows.mode = NO_FILE; Frain.file = NULL; Fclimate.file = NULL; Frunoff.file = NULL; Frdii.file = NULL; Fhotstart1.file = NULL; Fhotstart2.file = NULL; Finflows.file = NULL; Foutflows.file = NULL; Fout.file = NULL; Fout.mode = NO_FILE; // Analysis options UnitSystem = US; // US unit system FlowUnits = CFS; // CFS flow units InfilModel = HORTON; // Horton infiltration method RouteModel = KW; // Kin. wave flow routing method SurchargeMethod = EXTRAN; // Use EXTRAN method for surcharging //(5.1.013) CrownCutoff = 0.96; //(5.1.013) AllowPonding = FALSE; // No ponding at nodes InertDamping = SOME; // Partial inertial damping NormalFlowLtd = BOTH; // Default normal flow limitation ForceMainEqn = H_W; // Hazen-Williams eqn. for force mains LinkOffsets = DEPTH_OFFSET; // Use depth for link offsets LengtheningStep = 0; // No lengthening of conduits CourantFactor = 0.0; // No variable time step MinSurfArea = 0.0; // Force use of default min. surface area MinSlope = 0.0; // No user supplied minimum conduit slope SkipSteadyState = FALSE; // Do flow routing in steady state periods IgnoreRainfall = FALSE; // Analyze rainfall/runoff IgnoreRDII = FALSE; // Analyze RDII IgnoreSnowmelt = FALSE; // Analyze snowmelt IgnoreGwater = FALSE; // Analyze groundwater IgnoreRouting = FALSE; // Analyze flow routing IgnoreQuality = FALSE; // Analyze water quality WetStep = 300; // Runoff wet time step (secs) DryStep = 3600; // Runoff dry time step (secs) RuleStep = 0; // Rules evaluated at each routing step RouteStep = 300.0; // Routing time step (secs) MinRouteStep = 0.5; // Minimum variable time step (sec) ReportStep = 900; // Reporting time step (secs) StartDryDays = 0.0; // Antecedent dry days MaxTrials = 0; // Force use of default max. trials HeadTol = 0.0; // Force use of default head tolerance SysFlowTol = 0.05; // System flow tolerance for steady state LatFlowTol = 0.05; // Lateral flow tolerance for steady state NumThreads = 0; // Number of parallel threads to use NumEvents = 0; // Number of detailed routing events // Deprecated options SlopeWeighting = TRUE; // Use slope weighting Compatibility = SWMM4; // Use SWMM 4 up/dn weighting method // Starting & ending date/time StartDate = datetime_encodeDate(2004, 1, 1); StartTime = datetime_encodeTime(0,0,0); StartDateTime = StartDate + StartTime; EndDate = StartDate; EndTime = 0.0; ReportStartDate = NO_DATE; ReportStartTime = NO_DATE; SweepStart = 1; SweepEnd = 365; // Reporting options RptFlags.input = FALSE; RptFlags.continuity = TRUE; RptFlags.flowStats = TRUE; RptFlags.controls = FALSE; RptFlags.subcatchments = FALSE; RptFlags.nodes = FALSE; RptFlags.links = FALSE; RptFlags.nodeStats = FALSE; RptFlags.averages = FALSE; // Temperature data Temp.dataSource = NO_TEMP; Temp.tSeries = -1; Temp.ta = 70.0; Temp.elev = 0.0; Temp.anglat = 40.0; Temp.dtlong = 0.0; Temp.tmax = MISSING; // Wind speed data Wind.type = MONTHLY_WIND; for ( i=0; i<12; i++ ) Wind.aws[i] = 0.0; // Snowmelt parameters Snow.snotmp = 34.0; Snow.tipm = 0.5; Snow.rnm = 0.6; // Snow areal depletion curves for pervious and impervious surfaces for ( i=0; i<2; i++ ) { for ( j=0; j<10; j++) Snow.adc[i][j] = 1.0; } // Evaporation rates Evap.type = CONSTANT_EVAP; for (i=0; i<12; i++) { Evap.monthlyEvap[i] = 0.0; Evap.panCoeff[i] = 1.0; } Evap.recoveryPattern = -1; Evap.recoveryFactor = 1.0; Evap.tSeries = -1; Evap.dryOnly = FALSE; // Climate adjustments for (i = 0; i < 12; i++) { Adjust.temp[i] = 0.0; // additive adjustments Adjust.evap[i] = 0.0; // additive adjustments Adjust.rain[i] = 1.0; // multiplicative adjustments Adjust.hydcon[i] = 1.0; // hyd. conductivity adjustments } Adjust.rainFactor = 1.0; Adjust.hydconFactor = 1.0; } //============================================================================= void openFiles(char *f1, char *f2, char *f3) // // Input: f1 = name of input file // f2 = name of report file // f3 = name of binary output file // Output: none // Purpose: opens a project's input and report files. // { // --- initialize file pointers to NULL Finp.file = NULL; Frpt.file = NULL; Fout.file = NULL; // --- save file names sstrncpy(Finp.name, f1, MAXFNAME); sstrncpy(Frpt.name, f2, MAXFNAME); sstrncpy(Fout.name, f3, MAXFNAME); // --- check that file names are not identical if (strcomp(f1, f2) || strcomp(f1, f3) || strcomp(f2, f3)) { writecon(FMT11); ErrorCode = ERR_FILE_NAME; return; } // --- open input and report files if ((Finp.file = fopen(f1,"rt")) == NULL) { writecon(FMT12); writecon(f1); ErrorCode = ERR_INP_FILE; return; } if ((Frpt.file = fopen(f2,"wt")) == NULL) { writecon(FMT13); ErrorCode = ERR_RPT_FILE; return; } } //============================================================================= void createObjects() // // Input: none // Output: none // Purpose: allocates memory for project's objects. // // NOTE: number of each type of object has already been determined in // project_readInput(). // { int j, k; // --- allocate memory for each category of object if ( ErrorCode ) return; Gage = (TGage *) calloc(Nobjects[GAGE], sizeof(TGage)); Subcatch = (TSubcatch *) calloc(Nobjects[SUBCATCH], sizeof(TSubcatch)); Node = (TNode *) calloc(Nobjects[NODE], sizeof(TNode)); Outfall = (TOutfall *) calloc(Nnodes[OUTFALL], sizeof(TOutfall)); Divider = (TDivider *) calloc(Nnodes[DIVIDER], sizeof(TDivider)); Storage = (TStorage *) calloc(Nnodes[STORAGE], sizeof(TStorage)); Link = (TLink *) calloc(Nobjects[LINK], sizeof(TLink)); Conduit = (TConduit *) calloc(Nlinks[CONDUIT], sizeof(TConduit)); Pump = (TPump *) calloc(Nlinks[PUMP], sizeof(TPump)); Orifice = (TOrifice *) calloc(Nlinks[ORIFICE], sizeof(TOrifice)); Weir = (TWeir *) calloc(Nlinks[WEIR], sizeof(TWeir)); Outlet = (TOutlet *) calloc(Nlinks[OUTLET], sizeof(TOutlet)); Pollut = (TPollut *) calloc(Nobjects[POLLUT], sizeof(TPollut)); Landuse = (TLanduse *) calloc(Nobjects[LANDUSE], sizeof(TLanduse)); Pattern = (TPattern *) calloc(Nobjects[TIMEPATTERN], sizeof(TPattern)); Curve = (TTable *) calloc(Nobjects[CURVE], sizeof(TTable)); Tseries = (TTable *) calloc(Nobjects[TSERIES], sizeof(TTable)); Aquifer = (TAquifer *) calloc(Nobjects[AQUIFER], sizeof(TAquifer)); UnitHyd = (TUnitHyd *) calloc(Nobjects[UNITHYD], sizeof(TUnitHyd)); Snowmelt = (TSnowmelt *) calloc(Nobjects[SNOWMELT], sizeof(TSnowmelt)); Shape = (TShape *) calloc(Nobjects[SHAPE], sizeof(TShape)); // --- create array of detailed routing event periods Event = (TEvent *) calloc(NumEvents+1, sizeof(TEvent)); Event[NumEvents].start = BIG; Event[NumEvents].end = BIG + 1.0; // --- create LID objects lid_create(Nobjects[LID], Nobjects[SUBCATCH]); // --- create control rules ErrorCode = controls_create(Nobjects[CONTROL]); if ( ErrorCode ) return; // --- create cross section transects ErrorCode = transect_create(Nobjects[TRANSECT]); if ( ErrorCode ) return; // --- allocate memory for infiltration data infil_create(Nobjects[SUBCATCH], InfilModel); // --- allocate memory for water quality state variables for (j = 0; j < Nobjects[SUBCATCH]; j++) { Subcatch[j].initBuildup = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].pondedQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double)); } for (j = 0; j < Nobjects[NODE]; j++) { Node[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Node[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Node[j].extInflow = NULL; Node[j].dwfInflow = NULL; Node[j].rdiiInflow = NULL; Node[j].treatment = NULL; } for (j = 0; j < Nobjects[LINK]; j++) { Link[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Link[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Link[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double)); } // --- allocate memory for land use buildup/washoff functions for (j = 0; j < Nobjects[LANDUSE]; j++) { Landuse[j].buildupFunc = (TBuildup *) calloc(Nobjects[POLLUT], sizeof(TBuildup)); Landuse[j].washoffFunc = (TWashoff *) calloc(Nobjects[POLLUT], sizeof(TWashoff)); } // --- allocate memory for subcatchment landuse factors for (j = 0; j < Nobjects[SUBCATCH]; j++) { Subcatch[j].landFactor = (TLandFactor *) calloc(Nobjects[LANDUSE], sizeof(TLandFactor)); for (k = 0; k < Nobjects[LANDUSE]; k++) { Subcatch[j].landFactor[k].buildup = (double *) calloc(Nobjects[POLLUT], sizeof(double)); } } // --- initialize buildup & washoff functions for (j = 0; j < Nobjects[LANDUSE]; j++) { for (k = 0; k < Nobjects[POLLUT]; k++) { Landuse[j].buildupFunc[k].funcType = NO_BUILDUP; Landuse[j].buildupFunc[k].normalizer = PER_AREA; Landuse[j].washoffFunc[k].funcType = NO_WASHOFF; } } // --- initialize rain gage properties for (j = 0; j < Nobjects[GAGE]; j++) { Gage[j].tSeries = -1; strcpy(Gage[j].fname, ""); } // --- initialize subcatchment properties for (j = 0; j < Nobjects[SUBCATCH]; j++) { Subcatch[j].outSubcatch = -1; Subcatch[j].outNode = -1; Subcatch[j].infil = -1; Subcatch[j].groundwater = NULL; Subcatch[j].gwLatFlowExpr = NULL; Subcatch[j].gwDeepFlowExpr = NULL; Subcatch[j].snowpack = NULL; Subcatch[j].lidArea = 0.0; for (k = 0; k < Nobjects[POLLUT]; k++) { Subcatch[j].initBuildup[k] = 0.0; } } // --- initialize RDII unit hydrograph properties for ( j = 0; j < Nobjects[UNITHYD]; j++ ) rdii_initUnitHyd(j); // --- initialize snowmelt properties for ( j = 0; j < Nobjects[SNOWMELT]; j++ ) snow_initSnowmelt(j); // --- initialize storage node exfiltration for (j = 0; j < Nnodes[STORAGE]; j++) Storage[j].exfil = NULL; // --- initialize link properties for (j = 0; j < Nobjects[LINK]; j++) { Link[j].xsect.type = -1; Link[j].cLossInlet = 0.0; Link[j].cLossOutlet = 0.0; Link[j].cLossAvg = 0.0; Link[j].hasFlapGate = FALSE; } for (j = 0; j < Nlinks[PUMP]; j++) Pump[j].pumpCurve = -1; // --- initialize reporting flags for (j = 0; j < Nobjects[SUBCATCH]; j++) Subcatch[j].rptFlag = FALSE; for (j = 0; j < Nobjects[NODE]; j++) Node[j].rptFlag = FALSE; for (j = 0; j < Nobjects[LINK]; j++) Link[j].rptFlag = FALSE; // --- initialize curves, time series, and time patterns for (j = 0; j < Nobjects[CURVE]; j++) table_init(&Curve[j]); for (j = 0; j < Nobjects[TSERIES]; j++) table_init(&Tseries[j]); for (j = 0; j < Nobjects[TIMEPATTERN]; j++) inflow_initDwfPattern(j); } //============================================================================= void deleteObjects() // // Input: none // Output: none // Purpose: frees memory allocated for a project's objects. // // NOTE: care is taken to first free objects that are properties of another // object before the latter is freed (e.g., we must free a // subcatchment's land use factors before freeing the subcatchment). // { int j, k; // --- free memory for landuse factors & groundwater if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++) { for (k = 0; k < Nobjects[LANDUSE]; k++) { FREE(Subcatch[j].landFactor[k].buildup); } FREE(Subcatch[j].landFactor); FREE(Subcatch[j].groundwater); gwater_deleteFlowExpression(j); FREE(Subcatch[j].snowpack); } // --- free memory for buildup/washoff functions if ( Landuse ) for (j = 0; j < Nobjects[LANDUSE]; j++) { FREE(Landuse[j].buildupFunc); FREE(Landuse[j].washoffFunc) } // --- free memory for water quality state variables if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++) { FREE(Subcatch[j].initBuildup); FREE(Subcatch[j].oldQual); FREE(Subcatch[j].newQual); FREE(Subcatch[j].pondedQual); FREE(Subcatch[j].totalLoad); } if ( Node ) for (j = 0; j < Nobjects[NODE]; j++) { FREE(Node[j].oldQual); FREE(Node[j].newQual); } if ( Link ) for (j = 0; j < Nobjects[LINK]; j++) { FREE(Link[j].oldQual); FREE(Link[j].newQual); FREE(Link[j].totalLoad); } // --- free memory used for rainfall infiltration infil_delete(); // --- free memory used for storage exfiltration if ( Node ) for (j = 0; j < Nnodes[STORAGE]; j++) { if ( Storage[j].exfil ) { FREE(Storage[j].exfil->btmExfil); FREE(Storage[j].exfil->bankExfil); FREE(Storage[j].exfil); } } // --- free memory used for outfall pollutants loads if ( Node ) for (j = 0; j < Nnodes[OUTFALL]; j++) FREE(Outfall[j].wRouted); // --- free memory used for nodal inflows & treatment functions if ( Node ) for (j = 0; j < Nobjects[NODE]; j++) { inflow_deleteExtInflows(j); inflow_deleteDwfInflows(j); rdii_deleteRdiiInflow(j); treatmnt_delete(j); } // --- delete table entries for curves and time series if ( Tseries ) for (j = 0; j < Nobjects[TSERIES]; j++) table_deleteEntries(&Tseries[j]); if ( Curve ) for (j = 0; j < Nobjects[CURVE]; j++) table_deleteEntries(&Curve[j]); // --- delete cross section transects transect_delete(); // --- delete control rules controls_delete(); // --- delete LIDs lid_delete(); // --- now free each major category of object FREE(Gage); FREE(Subcatch); FREE(Node); FREE(Outfall); FREE(Divider); FREE(Storage); FREE(Link); FREE(Conduit); FREE(Pump); FREE(Orifice); FREE(Weir); FREE(Outlet); FREE(Pollut); FREE(Landuse); FREE(Pattern); FREE(Curve); FREE(Tseries); FREE(Aquifer); FREE(UnitHyd); FREE(Snowmelt); FREE(Shape); FREE(Event); } //============================================================================= void createHashTables() // // Input: none // Output: returns error code // Purpose: allocates memory for object ID hash tables // { int j; MemPoolAllocated = FALSE; for (j = 0; j < MAX_OBJ_TYPES ; j++) { Htable[j] = HTcreate(); if ( Htable[j] == NULL ) report_writeErrorMsg(ERR_MEMORY, ""); } // --- initialize memory pool used to store object ID's if ( AllocInit() == NULL ) report_writeErrorMsg(ERR_MEMORY, ""); else MemPoolAllocated = TRUE; } //============================================================================= void deleteHashTables() // // Input: none // Output: none // Purpose: frees memory allocated for object ID hash tables // { int j; for (j = 0; j < MAX_OBJ_TYPES; j++) { if ( Htable[j] != NULL ) HTfree(Htable[j]); } // --- free object ID memory pool if ( MemPoolAllocated ) AllocFreePool(); } //=============================================================================
bmesh_marking.c
/* * ***** BEGIN GPL LICENSE BLOCK ***** * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Contributor(s): Joseph Eagar, Geoffrey Bantle, Campbell Barton * * ***** END GPL LICENSE BLOCK ***** */ /** \file blender/bmesh/intern/bmesh_marking.c * \ingroup bmesh * * Selection routines for bmesh structures. * This is actually all old code ripped from * editmesh_lib.c and slightly modified to work * for bmesh's. This also means that it has some * of the same problems.... something that * that should be addressed eventually. */ #include <stddef.h> #include "MEM_guardedalloc.h" #include "DNA_scene_types.h" #include "BLI_math.h" #include "BLI_listbase.h" #include "bmesh.h" #include "bmesh_structure.h" static void recount_totsels(BMesh *bm) { const char iter_types[3] = {BM_VERTS_OF_MESH, BM_EDGES_OF_MESH, BM_FACES_OF_MESH}; int *tots[3]; int i; /* recount (tot * sel) variables */ bm->totvertsel = bm->totedgesel = bm->totfacesel = 0; tots[0] = &bm->totvertsel; tots[1] = &bm->totedgesel; tots[2] = &bm->totfacesel; #pragma omp parallel for schedule(static) if (bm->totvert + bm->totedge + bm->totface >= BM_OMP_LIMIT) for (i = 0; i < 3; i++) { BMIter iter; BMElem *ele; int count = 0; BM_ITER_MESH (ele, &iter, bm, iter_types[i]) { if (BM_elem_flag_test(ele, BM_ELEM_SELECT)) count += 1; } *tots[i] = count; } } /** \name BMesh helper functions for selection & hide flushing. * \{ */ static bool bm_vert_is_edge_select_any_other(const BMVert *v, const BMEdge *e_first) { const BMEdge *e_iter = e_first; /* start by stepping over the current edge */ while ((e_iter = bmesh_disk_edge_next(e_iter, v)) != e_first) { if (BM_elem_flag_test(e_iter, BM_ELEM_SELECT)) { return true; } } return false; } #if 0 static bool bm_vert_is_edge_select_any(const BMVert *v) { if (v->e) { const BMEdge *e_iter, *e_first; e_iter = e_first = v->e; do { if (BM_elem_flag_test(e_iter, BM_ELEM_SELECT)) { return true; } } while ((e_iter = bmesh_disk_edge_next(e_iter, v)) != e_first); } return false; } #endif static bool bm_vert_is_edge_visible_any(const BMVert *v) { if (v->e) { const BMEdge *e_iter, *e_first; e_iter = e_first = v->e; do { if (!BM_elem_flag_test(e_iter, BM_ELEM_HIDDEN)) { return true; } } while ((e_iter = bmesh_disk_edge_next(e_iter, v)) != e_first); } return false; } static bool bm_edge_is_face_select_any_other(BMLoop *l_first) { const BMLoop *l_iter = l_first; /* start by stepping over the current face */ while ((l_iter = l_iter->radial_next) != l_first) { if (BM_elem_flag_test(l_iter->f, BM_ELEM_SELECT)) { return true; } } return false; } #if 0 static bool bm_edge_is_face_select_any(const BMEdge *e) { if (e->l) { const BMLoop *l_iter, *l_first; l_iter = l_first = e->l; do { if (BM_elem_flag_test(l_iter->f, BM_ELEM_SELECT)) { return true; } } while ((l_iter = l_iter->radial_next) != l_first); } return false; } #endif static bool bm_edge_is_face_visible_any(const BMEdge *e) { if (e->l) { const BMLoop *l_iter, *l_first; l_iter = l_first = e->l; do { if (!BM_elem_flag_test(l_iter->f, BM_ELEM_HIDDEN)) { return true; } } while ((l_iter = l_iter->radial_next) != l_first); } return false; } /** \} */ /** * \brief Select Mode Clean * * Remove isolated selected elements when in a mode doesn't support them. * eg: in edge-mode a selected vertex must be connected to a selected edge. * * \note this could be made apart of #BM_mesh_select_mode_flush_ex */ void BM_mesh_select_mode_clean_ex(BMesh *bm, const short selectmode) { if (selectmode & SCE_SELECT_VERTEX) { /* pass */ } else if (selectmode & SCE_SELECT_EDGE) { BMIter iter; if (bm->totvertsel) { BMVert *v; BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) { BM_elem_flag_disable(v, BM_ELEM_SELECT); } bm->totvertsel = 0; } if (bm->totedgesel) { BMEdge *e; BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) { if (BM_elem_flag_test(e, BM_ELEM_SELECT)) { BM_vert_select_set(bm, e->v1, true); BM_vert_select_set(bm, e->v2, true); } } } } else if (selectmode & SCE_SELECT_FACE) { BMIter iter; if (bm->totvertsel) { BMVert *v; BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) { BM_elem_flag_disable(v, BM_ELEM_SELECT); } bm->totvertsel = 0; } if (bm->totedgesel) { BMEdge *e; BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) { BM_elem_flag_disable(e, BM_ELEM_SELECT); } bm->totedgesel = 0; } if (bm->totfacesel) { BMFace *f; BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) { if (BM_elem_flag_test(f, BM_ELEM_SELECT)) { BMLoop *l_iter, *l_first; l_iter = l_first = BM_FACE_FIRST_LOOP(f); do { BM_edge_select_set(bm, l_iter->e, true); } while ((l_iter = l_iter->next) != l_first); } } } } } void BM_mesh_select_mode_clean(BMesh *bm) { BM_mesh_select_mode_clean_ex(bm, bm->selectmode); } /** * \brief Select Mode Flush * * Makes sure to flush selections 'upwards' * (ie: all verts of an edge selects the edge and so on). * This should only be called by system and not tool authors. */ void BM_mesh_select_mode_flush_ex(BMesh *bm, const short selectmode) { BMEdge *e; BMLoop *l_iter; BMLoop *l_first; BMFace *f; BMIter eiter; BMIter fiter; if (selectmode & SCE_SELECT_VERTEX) { /* both loops only set edge/face flags and read off verts */ #pragma omp parallel sections if (bm->totedge + bm->totface >= BM_OMP_LIMIT) { #pragma omp section { BM_ITER_MESH (e, &eiter, bm, BM_EDGES_OF_MESH) { if (BM_elem_flag_test(e->v1, BM_ELEM_SELECT) && BM_elem_flag_test(e->v2, BM_ELEM_SELECT) && !BM_elem_flag_test(e, BM_ELEM_HIDDEN)) { BM_elem_flag_enable(e, BM_ELEM_SELECT); } else { BM_elem_flag_disable(e, BM_ELEM_SELECT); } } } #pragma omp section { BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) { bool ok = true; if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) { l_iter = l_first = BM_FACE_FIRST_LOOP(f); do { if (!BM_elem_flag_test(l_iter->v, BM_ELEM_SELECT)) { ok = false; break; } } while ((l_iter = l_iter->next) != l_first); } else { ok = false; } BM_elem_flag_set(f, BM_ELEM_SELECT, ok); } } } /* end sections */ } else if (selectmode & SCE_SELECT_EDGE) { BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) { bool ok = true; if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) { l_iter = l_first = BM_FACE_FIRST_LOOP(f); do { if (!BM_elem_flag_test(l_iter->e, BM_ELEM_SELECT)) { ok = false; break; } } while ((l_iter = l_iter->next) != l_first); } else { ok = false; } BM_elem_flag_set(f, BM_ELEM_SELECT, ok); } } /* Remove any deselected elements from the BMEditSelection */ BM_select_history_validate(bm); recount_totsels(bm); } void BM_mesh_select_mode_flush(BMesh *bm) { BM_mesh_select_mode_flush_ex(bm, bm->selectmode); } /** * mode independent flushing up/down */ void BM_mesh_deselect_flush(BMesh *bm) { BMIter eiter; BMEdge *e; BM_ITER_MESH (e, &eiter, bm, BM_EDGES_OF_MESH) { if (!BM_elem_flag_test(e, BM_ELEM_HIDDEN)) { if (BM_elem_flag_test(e, BM_ELEM_SELECT)) { if (!BM_elem_flag_test(e->v1, BM_ELEM_SELECT) || !BM_elem_flag_test(e->v2, BM_ELEM_SELECT)) { BM_elem_flag_disable(e, BM_ELEM_SELECT); } } if (e->l && !BM_elem_flag_test(e, BM_ELEM_SELECT)) { BMLoop *l_iter; BMLoop *l_first; l_iter = l_first = e->l; do { BM_elem_flag_disable(l_iter->f, BM_ELEM_SELECT); } while ((l_iter = l_iter->radial_next) != l_first); } } } /* Remove any deselected elements from the BMEditSelection */ BM_select_history_validate(bm); recount_totsels(bm); } /** * mode independent flushing up/down */ void BM_mesh_select_flush(BMesh *bm) { BMEdge *e; BMLoop *l_iter; BMLoop *l_first; BMFace *f; BMIter eiter; BMIter fiter; bool ok; /* we can use 2 sections here because the second loop isnt checking edge selection */ #pragma omp parallel sections if (bm->totedge + bm->totface >= BM_OMP_LIMIT) { #pragma omp section { BM_ITER_MESH (e, &eiter, bm, BM_EDGES_OF_MESH) { if (BM_elem_flag_test(e->v1, BM_ELEM_SELECT) && BM_elem_flag_test(e->v2, BM_ELEM_SELECT) && !BM_elem_flag_test(e, BM_ELEM_HIDDEN)) { BM_elem_flag_enable(e, BM_ELEM_SELECT); } } } #pragma omp section { BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) { ok = true; if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) { l_iter = l_first = BM_FACE_FIRST_LOOP(f); do { if (!BM_elem_flag_test(l_iter->v, BM_ELEM_SELECT)) { ok = false; break; } } while ((l_iter = l_iter->next) != l_first); } else { ok = false; } if (ok) { BM_elem_flag_enable(f, BM_ELEM_SELECT); } } } } recount_totsels(bm); } /** * \brief Select Vert * * Changes selection state of a single vertex * in a mesh */ void BM_vert_select_set(BMesh *bm, BMVert *v, const bool select) { BLI_assert(v->head.htype == BM_VERT); if (BM_elem_flag_test(v, BM_ELEM_HIDDEN)) { return; } if (select) { if (!BM_elem_flag_test(v, BM_ELEM_SELECT)) { BM_elem_flag_enable(v, BM_ELEM_SELECT); bm->totvertsel += 1; } } else { if (BM_elem_flag_test(v, BM_ELEM_SELECT)) { bm->totvertsel -= 1; BM_elem_flag_disable(v, BM_ELEM_SELECT); } } } /** * \brief Select Edge * * Changes selection state of a single edge in a mesh. */ void BM_edge_select_set(BMesh *bm, BMEdge *e, const bool select) { BLI_assert(e->head.htype == BM_EDGE); if (BM_elem_flag_test(e, BM_ELEM_HIDDEN)) { return; } if (select) { if (!BM_elem_flag_test(e, BM_ELEM_SELECT)) { BM_elem_flag_enable(e, BM_ELEM_SELECT); bm->totedgesel += 1; } BM_vert_select_set(bm, e->v1, true); BM_vert_select_set(bm, e->v2, true); } else { if (BM_elem_flag_test(e, BM_ELEM_SELECT)) { BM_elem_flag_disable(e, BM_ELEM_SELECT); bm->totedgesel -= 1; } if ((bm->selectmode & SCE_SELECT_VERTEX) == 0) { int i; /* check if the vert is used by a selected edge */ for (i = 0; i < 2; i++) { BMVert *v = *((&e->v1) + i); if (bm_vert_is_edge_select_any_other(v, e) == false) { BM_vert_select_set(bm, v, false); } } } else { BM_vert_select_set(bm, e->v1, false); BM_vert_select_set(bm, e->v2, false); } } } /** * \brief Select Face * * Changes selection state of a single * face in a mesh. */ void BM_face_select_set(BMesh *bm, BMFace *f, const bool select) { BMLoop *l_iter; BMLoop *l_first; BLI_assert(f->head.htype == BM_FACE); if (BM_elem_flag_test(f, BM_ELEM_HIDDEN)) { return; } if (select) { if (!BM_elem_flag_test(f, BM_ELEM_SELECT)) { BM_elem_flag_enable(f, BM_ELEM_SELECT); bm->totfacesel += 1; } l_iter = l_first = BM_FACE_FIRST_LOOP(f); do { BM_vert_select_set(bm, l_iter->v, true); BM_edge_select_set(bm, l_iter->e, true); } while ((l_iter = l_iter->next) != l_first); } else { if (BM_elem_flag_test(f, BM_ELEM_SELECT)) { BM_elem_flag_disable(f, BM_ELEM_SELECT); bm->totfacesel -= 1; } /** * \note This allows a temporarily invalid state - where for eg * an edge bay be de-selected, but an adjacent face remains selected. * * Rely on #BM_mesh_select_mode_flush to correct these cases. * * \note flushing based on mode, see T46494 */ if (bm->selectmode & SCE_SELECT_VERTEX) { l_iter = l_first = BM_FACE_FIRST_LOOP(f); do { BM_vert_select_set(bm, l_iter->v, false); BM_edge_select_set_noflush(bm, l_iter->e, false); } while ((l_iter = l_iter->next) != l_first); } else { /** * \note use #BM_edge_select_set_noflush, * vertex flushing is handled last. */ if (bm->selectmode & SCE_SELECT_EDGE) { l_iter = l_first = BM_FACE_FIRST_LOOP(f); do { BM_edge_select_set_noflush(bm, l_iter->e, false); } while ((l_iter = l_iter->next) != l_first); } else { l_iter = l_first = BM_FACE_FIRST_LOOP(f); do { if (bm_edge_is_face_select_any_other(l_iter) == false) { BM_edge_select_set_noflush(bm, l_iter->e, false); } } while ((l_iter = l_iter->next) != l_first); } /* flush down to verts */ l_iter = l_first = BM_FACE_FIRST_LOOP(f); do { if (bm_vert_is_edge_select_any_other(l_iter->v, l_iter->e) == false) { BM_vert_select_set(bm, l_iter->v, false); } } while ((l_iter = l_iter->next) != l_first); } } } /** \name Non flushing versions element selection. * \{ */ void BM_edge_select_set_noflush(BMesh *bm, BMEdge *e, const bool select) { BLI_assert(e->head.htype == BM_EDGE); if (BM_elem_flag_test(e, BM_ELEM_HIDDEN)) { return; } if (select) { if (!BM_elem_flag_test(e, BM_ELEM_SELECT)) { BM_elem_flag_enable(e, BM_ELEM_SELECT); bm->totedgesel += 1; } } else { if (BM_elem_flag_test(e, BM_ELEM_SELECT)) { BM_elem_flag_disable(e, BM_ELEM_SELECT); bm->totedgesel -= 1; } } } void BM_face_select_set_noflush(BMesh *bm, BMFace *f, const bool select) { BLI_assert(f->head.htype == BM_FACE); if (BM_elem_flag_test(f, BM_ELEM_HIDDEN)) { return; } if (select) { if (!BM_elem_flag_test(f, BM_ELEM_SELECT)) { BM_elem_flag_enable(f, BM_ELEM_SELECT); bm->totfacesel += 1; } } else { if (BM_elem_flag_test(f, BM_ELEM_SELECT)) { BM_elem_flag_disable(f, BM_ELEM_SELECT); bm->totfacesel -= 1; } } } /** \} */ /** * Select Mode Set * * Sets the selection mode for the bmesh, * updating the selection state. */ void BM_mesh_select_mode_set(BMesh *bm, int selectmode) { BMIter iter; BMElem *ele; bm->selectmode = selectmode; if (bm->selectmode & SCE_SELECT_VERTEX) { /* disabled because selection flushing handles these */ #if 0 BM_ITER_MESH (ele, &iter, bm, BM_EDGES_OF_MESH) { BM_elem_flag_disable(ele, BM_ELEM_SELECT); } BM_ITER_MESH (ele, &iter, bm, BM_FACES_OF_MESH) { BM_elem_flag_disable(ele, BM_ELEM_SELECT); } #endif BM_mesh_select_mode_flush(bm); } else if (bm->selectmode & SCE_SELECT_EDGE) { /* disabled because selection flushing handles these */ #if 0 BM_ITER_MESH (ele, &iter, bm, BM_VERTS_OF_MESH) { BM_elem_flag_disable(ele, BM_ELEM_SELECT); } #endif BM_ITER_MESH (ele, &iter, bm, BM_EDGES_OF_MESH) { if (BM_elem_flag_test(ele, BM_ELEM_SELECT)) { BM_edge_select_set(bm, (BMEdge *)ele, true); } } BM_mesh_select_mode_flush(bm); } else if (bm->selectmode & SCE_SELECT_FACE) { /* disabled because selection flushing handles these */ #if 0 BM_ITER_MESH (ele, &iter, bm, BM_EDGES_OF_MESH) { BM_elem_flag_disable(ele, BM_ELEM_SELECT); } #endif BM_ITER_MESH (ele, &iter, bm, BM_FACES_OF_MESH) { if (BM_elem_flag_test(ele, BM_ELEM_SELECT)) { BM_face_select_set(bm, (BMFace *)ele, true); } } BM_mesh_select_mode_flush(bm); } } /** * counts number of elements with flag enabled/disabled */ static int bm_mesh_flag_count( BMesh *bm, const char htype, const char hflag, const bool respecthide, const bool test_for_enabled) { BMElem *ele; BMIter iter; int tot = 0; BLI_assert((htype & ~BM_ALL_NOLOOP) == 0); if (htype & BM_VERT) { BM_ITER_MESH (ele, &iter, bm, BM_VERTS_OF_MESH) { if (respecthide && BM_elem_flag_test(ele, BM_ELEM_HIDDEN)) continue; if (BM_elem_flag_test_bool(ele, hflag) == test_for_enabled) tot++; } } if (htype & BM_EDGE) { BM_ITER_MESH (ele, &iter, bm, BM_EDGES_OF_MESH) { if (respecthide && BM_elem_flag_test(ele, BM_ELEM_HIDDEN)) continue; if (BM_elem_flag_test_bool(ele, hflag) == test_for_enabled) tot++; } } if (htype & BM_FACE) { BM_ITER_MESH (ele, &iter, bm, BM_FACES_OF_MESH) { if (respecthide && BM_elem_flag_test(ele, BM_ELEM_HIDDEN)) continue; if (BM_elem_flag_test_bool(ele, hflag) == test_for_enabled) tot++; } } return tot; } int BM_mesh_elem_hflag_count_enabled(BMesh *bm, const char htype, const char hflag, const bool respecthide) { return bm_mesh_flag_count(bm, htype, hflag, respecthide, true); } int BM_mesh_elem_hflag_count_disabled(BMesh *bm, const char htype, const char hflag, const bool respecthide) { return bm_mesh_flag_count(bm, htype, hflag, respecthide, false); } /** * \note use BM_elem_flag_test(ele, BM_ELEM_SELECT) to test selection * \note by design, this will not touch the editselection history stuff */ void BM_elem_select_set(BMesh *bm, BMElem *ele, const bool select) { switch (ele->head.htype) { case BM_VERT: BM_vert_select_set(bm, (BMVert *)ele, select); break; case BM_EDGE: BM_edge_select_set(bm, (BMEdge *)ele, select); break; case BM_FACE: BM_face_select_set(bm, (BMFace *)ele, select); break; default: BLI_assert(0); break; } } /* this replaces the active flag used in uv/face mode */ void BM_mesh_active_face_set(BMesh *bm, BMFace *efa) { bm->act_face = efa; } BMFace *BM_mesh_active_face_get(BMesh *bm, const bool is_sloppy, const bool is_selected) { if (bm->act_face && (!is_selected || BM_elem_flag_test(bm->act_face, BM_ELEM_SELECT))) { return bm->act_face; } else if (is_sloppy) { BMIter iter; BMFace *f = NULL; BMEditSelection *ese; /* Find the latest non-hidden face from the BMEditSelection */ ese = bm->selected.last; for ( ; ese; ese = ese->prev) { if (ese->htype == BM_FACE) { f = (BMFace *)ese->ele; if (BM_elem_flag_test(f, BM_ELEM_HIDDEN)) { f = NULL; } else if (is_selected && !BM_elem_flag_test(f, BM_ELEM_SELECT)) { f = NULL; } else { break; } } } /* Last attempt: try to find any selected face */ if (f == NULL) { BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) { if (BM_elem_flag_test(f, BM_ELEM_SELECT)) { break; } } } return f; /* can still be null */ } return NULL; } BMEdge *BM_mesh_active_edge_get(BMesh *bm) { if (bm->selected.last) { BMEditSelection *ese = bm->selected.last; if (ese && ese->htype == BM_EDGE) { return (BMEdge *)ese->ele; } } return NULL; } BMVert *BM_mesh_active_vert_get(BMesh *bm) { if (bm->selected.last) { BMEditSelection *ese = bm->selected.last; if (ese && ese->htype == BM_VERT) { return (BMVert *)ese->ele; } } return NULL; } BMElem *BM_mesh_active_elem_get(BMesh *bm) { if (bm->selected.last) { BMEditSelection *ese = bm->selected.last; if (ese) { return ese->ele; } } return NULL; } /** * Generic way to get data from an EditSelection type * These functions were written to be used by the Modifier widget * when in Rotate about active mode, but can be used anywhere. * * - #BM_editselection_center * - #BM_editselection_normal * - #BM_editselection_plane */ void BM_editselection_center(BMEditSelection *ese, float r_center[3]) { if (ese->htype == BM_VERT) { BMVert *eve = (BMVert *)ese->ele; copy_v3_v3(r_center, eve->co); } else if (ese->htype == BM_EDGE) { BMEdge *eed = (BMEdge *)ese->ele; mid_v3_v3v3(r_center, eed->v1->co, eed->v2->co); } else if (ese->htype == BM_FACE) { BMFace *efa = (BMFace *)ese->ele; BM_face_calc_center_mean(efa, r_center); } } void BM_editselection_normal(BMEditSelection *ese, float r_normal[3]) { if (ese->htype == BM_VERT) { BMVert *eve = (BMVert *)ese->ele; copy_v3_v3(r_normal, eve->no); } else if (ese->htype == BM_EDGE) { BMEdge *eed = (BMEdge *)ese->ele; float plane[3]; /* need a plane to correct the normal */ float vec[3]; /* temp vec storage */ add_v3_v3v3(r_normal, eed->v1->no, eed->v2->no); sub_v3_v3v3(plane, eed->v2->co, eed->v1->co); /* the 2 vertex normals will be close but not at rightangles to the edge * for rotate about edge we want them to be at right angles, so we need to * do some extra calculation to correct the vert normals, * we need the plane for this */ cross_v3_v3v3(vec, r_normal, plane); cross_v3_v3v3(r_normal, plane, vec); normalize_v3(r_normal); } else if (ese->htype == BM_FACE) { BMFace *efa = (BMFace *)ese->ele; copy_v3_v3(r_normal, efa->no); } } /* Calculate a plane that is rightangles to the edge/vert/faces normal * also make the plane run along an axis that is related to the geometry, * because this is used for the manipulators Y axis. */ void BM_editselection_plane(BMEditSelection *ese, float r_plane[3]) { if (ese->htype == BM_VERT) { BMVert *eve = (BMVert *)ese->ele; float vec[3] = {0.0f, 0.0f, 0.0f}; if (ese->prev) { /* use previously selected data to make a useful vertex plane */ BM_editselection_center(ese->prev, vec); sub_v3_v3v3(r_plane, vec, eve->co); } else { /* make a fake plane thats at rightangles to the normal * we cant make a crossvec from a vec thats the same as the vec * unlikely but possible, so make sure if the normal is (0, 0, 1) * that vec isn't the same or in the same direction even. */ if (eve->no[0] < 0.5f) vec[0] = 1.0f; else if (eve->no[1] < 0.5f) vec[1] = 1.0f; else vec[2] = 1.0f; cross_v3_v3v3(r_plane, eve->no, vec); } normalize_v3(r_plane); } else if (ese->htype == BM_EDGE) { BMEdge *eed = (BMEdge *)ese->ele; if (BM_edge_is_boundary(eed)) { sub_v3_v3v3(r_plane, eed->l->v->co, eed->l->next->v->co); } else { /* the plane is simple, it runs along the edge * however selecting different edges can swap the direction of the y axis. * this makes it less likely for the y axis of the manipulator * (running along the edge).. to flip less often. * at least its more predictable */ if (eed->v2->co[1] > eed->v1->co[1]) { /* check which to do first */ sub_v3_v3v3(r_plane, eed->v2->co, eed->v1->co); } else { sub_v3_v3v3(r_plane, eed->v1->co, eed->v2->co); } } normalize_v3(r_plane); } else if (ese->htype == BM_FACE) { BMFace *efa = (BMFace *)ese->ele; BM_face_calc_tangent_auto(efa, r_plane); } } static BMEditSelection *bm_select_history_create(BMHeader *ele) { BMEditSelection *ese = (BMEditSelection *) MEM_callocN(sizeof(BMEditSelection), "BMEdit Selection"); ese->htype = ele->htype; ese->ele = (BMElem *)ele; return ese; } /* --- macro wrapped funcs --- */ bool _bm_select_history_check(BMesh *bm, const BMHeader *ele) { return (BLI_findptr(&bm->selected, ele, offsetof(BMEditSelection, ele)) != NULL); } bool _bm_select_history_remove(BMesh *bm, BMHeader *ele) { BMEditSelection *ese = BLI_findptr(&bm->selected, ele, offsetof(BMEditSelection, ele)); if (ese) { BLI_freelinkN(&bm->selected, ese); return true; } else { return false; } } void _bm_select_history_store_notest(BMesh *bm, BMHeader *ele) { BMEditSelection *ese = bm_select_history_create(ele); BLI_addtail(&(bm->selected), ese); } void _bm_select_history_store_head_notest(BMesh *bm, BMHeader *ele) { BMEditSelection *ese = bm_select_history_create(ele); BLI_addhead(&(bm->selected), ese); } void _bm_select_history_store(BMesh *bm, BMHeader *ele) { if (!BM_select_history_check(bm, (BMElem *)ele)) { BM_select_history_store_notest(bm, (BMElem *)ele); } } void _bm_select_history_store_head(BMesh *bm, BMHeader *ele) { if (!BM_select_history_check(bm, (BMElem *)ele)) { BM_select_history_store_head_notest(bm, (BMElem *)ele); } } void _bm_select_history_store_after_notest(BMesh *bm, BMEditSelection *ese_ref, BMHeader *ele) { BMEditSelection *ese = bm_select_history_create(ele); BLI_insertlinkafter(&(bm->selected), ese_ref, ese); } void _bm_select_history_store_after(BMesh *bm, BMEditSelection *ese_ref, BMHeader *ele) { if (!BM_select_history_check(bm, (BMElem *)ele)) { BM_select_history_store_after_notest(bm, ese_ref, (BMElem *)ele); } } /* --- end macro wrapped funcs --- */ void BM_select_history_clear(BMesh *bm) { BLI_freelistN(&bm->selected); } void BM_select_history_validate(BMesh *bm) { BMEditSelection *ese, *ese_next; for (ese = bm->selected.first; ese; ese = ese_next) { ese_next = ese->next; if (!BM_elem_flag_test(ese->ele, BM_ELEM_SELECT)) { BLI_freelinkN(&(bm->selected), ese); } } } /** * Get the active mesh element (with active-face fallback). */ bool BM_select_history_active_get(BMesh *bm, BMEditSelection *ese) { BMEditSelection *ese_last = bm->selected.last; BMFace *efa = BM_mesh_active_face_get(bm, false, false); ese->next = ese->prev = NULL; if (ese_last) { if (ese_last->htype == BM_FACE) { /* if there is an active face, use it over the last selected face */ if (efa) { ese->ele = (BMElem *)efa; } else { ese->ele = ese_last->ele; } ese->htype = BM_FACE; } else { ese->ele = ese_last->ele; ese->htype = ese_last->htype; } } else if (efa) { /* no edit-selection, fallback to active face */ ese->ele = (BMElem *)efa; ese->htype = BM_FACE; } else { ese->ele = NULL; return false; } return true; } /** * Return a map from BMVert/Edge/Face -> BMEditSelection */ GHash *BM_select_history_map_create(BMesh *bm) { BMEditSelection *ese; GHash *map; if (BLI_listbase_is_empty(&bm->selected)) { return NULL; } map = BLI_ghash_ptr_new(__func__); for (ese = bm->selected.first; ese; ese = ese->next) { BLI_ghash_insert(map, ese->ele, ese); } return map; } void BM_mesh_elem_hflag_disable_test( BMesh *bm, const char htype, const char hflag, const bool respecthide, const bool overwrite, const char hflag_test) { const char iter_types[3] = {BM_VERTS_OF_MESH, BM_EDGES_OF_MESH, BM_FACES_OF_MESH}; const char flag_types[3] = {BM_VERT, BM_EDGE, BM_FACE}; const char hflag_nosel = hflag & ~BM_ELEM_SELECT; int i; BLI_assert((htype & ~BM_ALL_NOLOOP) == 0); if (hflag & BM_ELEM_SELECT) { BM_select_history_clear(bm); } if ((htype == (BM_VERT | BM_EDGE | BM_FACE)) && (hflag == BM_ELEM_SELECT) && (respecthide == false) && (hflag_test == 0)) { /* fast path for deselect all, avoid topology loops * since we know all will be de-selected anyway. */ #pragma omp parallel for schedule(static) if (bm->totvert + bm->totedge + bm->totface >= BM_OMP_LIMIT) for (i = 0; i < 3; i++) { BMIter iter; BMElem *ele; ele = BM_iter_new(&iter, bm, iter_types[i], NULL); for ( ; ele; ele = BM_iter_step(&iter)) { BM_elem_flag_disable(ele, BM_ELEM_SELECT); } } bm->totvertsel = bm->totedgesel = bm->totfacesel = 0; } else { for (i = 0; i < 3; i++) { BMIter iter; BMElem *ele; if (htype & flag_types[i]) { ele = BM_iter_new(&iter, bm, iter_types[i], NULL); for ( ; ele; ele = BM_iter_step(&iter)) { if (UNLIKELY(respecthide && BM_elem_flag_test(ele, BM_ELEM_HIDDEN))) { /* pass */ } else if (!hflag_test || BM_elem_flag_test(ele, hflag_test)) { if (hflag & BM_ELEM_SELECT) { BM_elem_select_set(bm, ele, false); } BM_elem_flag_disable(ele, hflag); } else if (overwrite) { /* no match! */ if (hflag & BM_ELEM_SELECT) { BM_elem_select_set(bm, ele, true); } BM_elem_flag_enable(ele, hflag_nosel); } } } } } } void BM_mesh_elem_hflag_enable_test( BMesh *bm, const char htype, const char hflag, const bool respecthide, const bool overwrite, const char hflag_test) { const char iter_types[3] = {BM_VERTS_OF_MESH, BM_EDGES_OF_MESH, BM_FACES_OF_MESH}; const char flag_types[3] = {BM_VERT, BM_EDGE, BM_FACE}; /* use the nosel version when setting so under no * condition may a hidden face become selected. * Applying other flags to hidden faces is OK. */ const char hflag_nosel = hflag & ~BM_ELEM_SELECT; BMIter iter; BMElem *ele; int i; BLI_assert((htype & ~BM_ALL_NOLOOP) == 0); if (hflag & BM_ELEM_SELECT) { BM_select_history_clear(bm); } /* note, better not attempt a fast path for selection as done with de-select * because hidden geometry and different selection modes can give different results, * we could of course check for no hidden faces and then use quicker method but its not worth it. */ for (i = 0; i < 3; i++) { if (htype & flag_types[i]) { ele = BM_iter_new(&iter, bm, iter_types[i], NULL); for ( ; ele; ele = BM_iter_step(&iter)) { if (UNLIKELY(respecthide && BM_elem_flag_test(ele, BM_ELEM_HIDDEN))) { /* pass */ } else if (!hflag_test || BM_elem_flag_test(ele, hflag_test)) { /* match! */ if (hflag & BM_ELEM_SELECT) { BM_elem_select_set(bm, ele, true); } BM_elem_flag_enable(ele, hflag_nosel); } else if (overwrite) { /* no match! */ if (hflag & BM_ELEM_SELECT) { BM_elem_select_set(bm, ele, false); } BM_elem_flag_disable(ele, hflag); } } } } } void BM_mesh_elem_hflag_disable_all( BMesh *bm, const char htype, const char hflag, const bool respecthide) { /* call with 0 hflag_test */ BM_mesh_elem_hflag_disable_test(bm, htype, hflag, respecthide, false, 0); } void BM_mesh_elem_hflag_enable_all( BMesh *bm, const char htype, const char hflag, const bool respecthide) { /* call with 0 hflag_test */ BM_mesh_elem_hflag_enable_test(bm, htype, hflag, respecthide, false, 0); } /***************** Mesh Hiding stuff *********** */ /** * Hide unless any connected elements are visible. * Run this after hiding a connected edge or face. */ static void vert_flush_hide_set(BMVert *v) { BM_elem_flag_set(v, BM_ELEM_HIDDEN, !bm_vert_is_edge_visible_any(v)); } /** * Hide unless any connected elements are visible. * Run this after hiding a connected face. */ static void edge_flush_hide_set(BMEdge *e) { BM_elem_flag_set(e, BM_ELEM_HIDDEN, !bm_edge_is_face_visible_any(e)); } void BM_vert_hide_set(BMVert *v, const bool hide) { /* vert hiding: vert + surrounding edges and faces */ BLI_assert(v->head.htype == BM_VERT); if (hide) { BLI_assert(!BM_elem_flag_test(v, BM_ELEM_SELECT)); } BM_elem_flag_set(v, BM_ELEM_HIDDEN, hide); if (v->e) { BMEdge *e_iter, *e_first; e_iter = e_first = v->e; do { BM_elem_flag_set(e_iter, BM_ELEM_HIDDEN, hide); if (e_iter->l) { const BMLoop *l_radial_iter, *l_radial_first; l_radial_iter = l_radial_first = e_iter->l; do { BM_elem_flag_set(l_radial_iter->f, BM_ELEM_HIDDEN, hide); } while ((l_radial_iter = l_radial_iter->radial_next) != l_radial_first); } } while ((e_iter = bmesh_disk_edge_next(e_iter, v)) != e_first); } } void BM_edge_hide_set(BMEdge *e, const bool hide) { BLI_assert(e->head.htype == BM_EDGE); if (hide) { BLI_assert(!BM_elem_flag_test(e, BM_ELEM_SELECT)); } /* edge hiding: faces around the edge */ if (e->l) { const BMLoop *l_iter, *l_first; l_iter = l_first = e->l; do { BM_elem_flag_set(l_iter->f, BM_ELEM_HIDDEN, hide); } while ((l_iter = l_iter->radial_next) != l_first); } BM_elem_flag_set(e, BM_ELEM_HIDDEN, hide); /* hide vertices if necessary */ if (hide) { vert_flush_hide_set(e->v1); vert_flush_hide_set(e->v2); } else { BM_elem_flag_disable(e->v1, BM_ELEM_HIDDEN); BM_elem_flag_disable(e->v2, BM_ELEM_HIDDEN); } } void BM_face_hide_set(BMFace *f, const bool hide) { BLI_assert(f->head.htype == BM_FACE); if (hide) { BLI_assert(!BM_elem_flag_test(f, BM_ELEM_SELECT)); } BM_elem_flag_set(f, BM_ELEM_HIDDEN, hide); if (hide) { BMLoop *l_first = BM_FACE_FIRST_LOOP(f); BMLoop *l_iter; l_iter = l_first; do { edge_flush_hide_set(l_iter->e); } while ((l_iter = l_iter->next) != l_first); l_iter = l_first; do { vert_flush_hide_set(l_iter->v); } while ((l_iter = l_iter->next) != l_first); } else { BMLoop *l_first = BM_FACE_FIRST_LOOP(f); BMLoop *l_iter; l_iter = l_first; do { BM_elem_flag_disable(l_iter->e, BM_ELEM_HIDDEN); BM_elem_flag_disable(l_iter->v, BM_ELEM_HIDDEN); } while ((l_iter = l_iter->next) != l_first); } } void _bm_elem_hide_set(BMesh *bm, BMHeader *head, const bool hide) { /* Follow convention of always deselecting before * hiding an element */ switch (head->htype) { case BM_VERT: if (hide) BM_vert_select_set(bm, (BMVert *)head, false); BM_vert_hide_set((BMVert *)head, hide); break; case BM_EDGE: if (hide) BM_edge_select_set(bm, (BMEdge *)head, false); BM_edge_hide_set((BMEdge *)head, hide); break; case BM_FACE: if (hide) BM_face_select_set(bm, (BMFace *)head, false); BM_face_hide_set((BMFace *)head, hide); break; default: BMESH_ASSERT(0); break; } }
convolutiondepthwise_5x5_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw5x5s1_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); float* outptr1 = out.row(1); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); const float* r5 = img0.row(5); int i = 0; for (; i + 1 < outh; i += 2) { int j = 0; for (; j < outw; j++) { __builtin_prefetch(r0 + 16); __builtin_prefetch(r1 + 16); __builtin_prefetch(r2 + 16); __builtin_prefetch(r3 + 16); __builtin_prefetch(r4 + 16); __builtin_prefetch(r5 + 16); __builtin_prefetch(k0 + 400); v4f32 _sum0 = _bias0; v4f32 _sum1 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k00, _r00); _sum0 = __msa_fmadd_w(_sum0, _k01, _r01); _sum0 = __msa_fmadd_w(_sum0, _k02, _r02); _sum0 = __msa_fmadd_w(_sum0, _k03, _r03); _sum0 = __msa_fmadd_w(_sum0, _k04, _r04); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0); v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k00, _r10); _sum1 = __msa_fmadd_w(_sum1, _k01, _r11); _sum1 = __msa_fmadd_w(_sum1, _k02, _r12); _sum1 = __msa_fmadd_w(_sum1, _k03, _r13); _sum1 = __msa_fmadd_w(_sum1, _k04, _r14); v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k10, _r10); _sum0 = __msa_fmadd_w(_sum0, _k11, _r11); _sum0 = __msa_fmadd_w(_sum0, _k12, _r12); _sum0 = __msa_fmadd_w(_sum0, _k13, _r13); _sum0 = __msa_fmadd_w(_sum0, _k14, _r14); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0); v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k10, _r20); _sum1 = __msa_fmadd_w(_sum1, _k11, _r21); _sum1 = __msa_fmadd_w(_sum1, _k12, _r22); _sum1 = __msa_fmadd_w(_sum1, _k13, _r23); _sum1 = __msa_fmadd_w(_sum1, _k14, _r24); v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k20, _r20); _sum0 = __msa_fmadd_w(_sum0, _k21, _r21); _sum0 = __msa_fmadd_w(_sum0, _k22, _r22); _sum0 = __msa_fmadd_w(_sum0, _k23, _r23); _sum0 = __msa_fmadd_w(_sum0, _k24, _r24); v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0); v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0); v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0); v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0); v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k20, _r30); _sum1 = __msa_fmadd_w(_sum1, _k21, _r31); _sum1 = __msa_fmadd_w(_sum1, _k22, _r32); _sum1 = __msa_fmadd_w(_sum1, _k23, _r33); _sum1 = __msa_fmadd_w(_sum1, _k24, _r34); v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k30, _r30); _sum0 = __msa_fmadd_w(_sum0, _k31, _r31); _sum0 = __msa_fmadd_w(_sum0, _k32, _r32); _sum0 = __msa_fmadd_w(_sum0, _k33, _r33); _sum0 = __msa_fmadd_w(_sum0, _k34, _r34); v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0); v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0); v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0); v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0); v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k30, _r40); _sum1 = __msa_fmadd_w(_sum1, _k31, _r41); _sum1 = __msa_fmadd_w(_sum1, _k32, _r42); _sum1 = __msa_fmadd_w(_sum1, _k33, _r43); _sum1 = __msa_fmadd_w(_sum1, _k34, _r44); v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 -= 4 * 20; _sum0 = __msa_fmadd_w(_sum0, _k40, _r40); _sum0 = __msa_fmadd_w(_sum0, _k41, _r41); _sum0 = __msa_fmadd_w(_sum0, _k42, _r42); _sum0 = __msa_fmadd_w(_sum0, _k43, _r43); _sum0 = __msa_fmadd_w(_sum0, _k44, _r44); v4f32 _r50 = (v4f32)__msa_ld_w(r5, 0); v4f32 _r51 = (v4f32)__msa_ld_w(r5 + 4, 0); v4f32 _r52 = (v4f32)__msa_ld_w(r5 + 4 * 2, 0); v4f32 _r53 = (v4f32)__msa_ld_w(r5 + 4 * 3, 0); v4f32 _r54 = (v4f32)__msa_ld_w(r5 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k40, _r50); _sum1 = __msa_fmadd_w(_sum1, _k41, _r51); _sum1 = __msa_fmadd_w(_sum1, _k42, _r52); _sum1 = __msa_fmadd_w(_sum1, _k43, _r53); _sum1 = __msa_fmadd_w(_sum1, _k44, _r54); __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr1, 0); outptr0 += 4; outptr1 += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } r0 += 4 * 4 + w * 4; r1 += 4 * 4 + w * 4; r2 += 4 * 4 + w * 4; r3 += 4 * 4 + w * 4; r4 += 4 * 4 + w * 4; r5 += 4 * 4 + w * 4; outptr0 += outw * 4; outptr1 += outw * 4; } for (; i < outh; i++) { int j = 0; for (; j < outw; j++) { __builtin_prefetch(r0 + 16); __builtin_prefetch(r1 + 16); __builtin_prefetch(r2 + 16); __builtin_prefetch(r3 + 16); __builtin_prefetch(r4 + 16); __builtin_prefetch(k0 + 400); v4f32 _sum0 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k00, _r00); _sum0 = __msa_fmadd_w(_sum0, _k01, _r01); _sum0 = __msa_fmadd_w(_sum0, _k02, _r02); _sum0 = __msa_fmadd_w(_sum0, _k03, _r03); _sum0 = __msa_fmadd_w(_sum0, _k04, _r04); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0); v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0); v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k10, _r10); _sum0 = __msa_fmadd_w(_sum0, _k11, _r11); _sum0 = __msa_fmadd_w(_sum0, _k12, _r12); _sum0 = __msa_fmadd_w(_sum0, _k13, _r13); _sum0 = __msa_fmadd_w(_sum0, _k14, _r14); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0); v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0); v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k20, _r20); _sum0 = __msa_fmadd_w(_sum0, _k21, _r21); _sum0 = __msa_fmadd_w(_sum0, _k22, _r22); _sum0 = __msa_fmadd_w(_sum0, _k23, _r23); _sum0 = __msa_fmadd_w(_sum0, _k24, _r24); v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0); v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0); v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0); v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0); v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0); v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k30, _r30); _sum0 = __msa_fmadd_w(_sum0, _k31, _r31); _sum0 = __msa_fmadd_w(_sum0, _k32, _r32); _sum0 = __msa_fmadd_w(_sum0, _k33, _r33); _sum0 = __msa_fmadd_w(_sum0, _k34, _r34); v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0); v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0); v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0); v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0); v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0); v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 -= 4 * 20; _sum0 = __msa_fmadd_w(_sum0, _k40, _r40); _sum0 = __msa_fmadd_w(_sum0, _k41, _r41); _sum0 = __msa_fmadd_w(_sum0, _k42, _r42); _sum0 = __msa_fmadd_w(_sum0, _k43, _r43); _sum0 = __msa_fmadd_w(_sum0, _k44, _r44); __msa_st_w((v4i32)_sum0, outptr0, 0); outptr0 += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } r0 += 4 * 4; r1 += 4 * 4; r2 += 4 * 4; r3 += 4 * 4; r4 += 4 * 4; } } } static void convdw5x5s2_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0); const float* k0 = kernel.row(g); float* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); int i = 0; for (; i < outh; i++) { int j = 0; for (; j < outw; j++) { __builtin_prefetch(r0 + 32); __builtin_prefetch(r1 + 32); __builtin_prefetch(r2 + 32); __builtin_prefetch(r3 + 32); __builtin_prefetch(r4 + 32); __builtin_prefetch(k0 + 400); v4f32 _sum0 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k00, _r00); _sum0 = __msa_fmadd_w(_sum0, _k01, _r01); _sum0 = __msa_fmadd_w(_sum0, _k02, _r02); _sum0 = __msa_fmadd_w(_sum0, _k03, _r03); _sum0 = __msa_fmadd_w(_sum0, _k04, _r04); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0); v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0); v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k10, _r10); _sum0 = __msa_fmadd_w(_sum0, _k11, _r11); _sum0 = __msa_fmadd_w(_sum0, _k12, _r12); _sum0 = __msa_fmadd_w(_sum0, _k13, _r13); _sum0 = __msa_fmadd_w(_sum0, _k14, _r14); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0); v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0); v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k20, _r20); _sum0 = __msa_fmadd_w(_sum0, _k21, _r21); _sum0 = __msa_fmadd_w(_sum0, _k22, _r22); _sum0 = __msa_fmadd_w(_sum0, _k23, _r23); _sum0 = __msa_fmadd_w(_sum0, _k24, _r24); v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0); v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0); v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0); v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0); v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0); v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k30, _r30); _sum0 = __msa_fmadd_w(_sum0, _k31, _r31); _sum0 = __msa_fmadd_w(_sum0, _k32, _r32); _sum0 = __msa_fmadd_w(_sum0, _k33, _r33); _sum0 = __msa_fmadd_w(_sum0, _k34, _r34); v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0); v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0); v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0); v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0); v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0); v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 -= 4 * 20; _sum0 = __msa_fmadd_w(_sum0, _k40, _r40); _sum0 = __msa_fmadd_w(_sum0, _k41, _r41); _sum0 = __msa_fmadd_w(_sum0, _k42, _r42); _sum0 = __msa_fmadd_w(_sum0, _k43, _r43); _sum0 = __msa_fmadd_w(_sum0, _k44, _r44); __msa_st_w((v4i32)_sum0, outptr0, 0); outptr0 += 4; r0 += 4 * 2; r1 += 4 * 2; r2 += 4 * 2; r3 += 4 * 2; r4 += 4 * 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } }
GB_unaryop__minv_int64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int64_int64 // op(A') function: GB_tran__minv_int64_int64 // C type: int64_t // A type: int64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 64) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int64_int64 ( int64_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dger_2_save.c
#define max(a,b) (((a) < (b))? (b) : (a)) #define min(a,b) (((a) < (b))? (a) : (b)) #include <omp.h> #define _NB_1 1 void dger(const int M,const int N,const double alpha,const double* X,const int incX,const double* Y,const int incY,double* A,const int lda) { int i; int j; int j_bk_1; int j_bk_2; int i_bk_3; double _X_cp_0; double _Y_cp_0; double _Y_cp_1; double _Y_cp_2; double _Y_cp_3; omp_set_num_threads(16); #pragma omp parallel { /*@;BEGIN(nest1_group3=Nest)@*/#pragma omp for private(i,j,j_bk_1,j_bk_2,i_bk_3,_X_cp_0,_Y_cp_0,_Y_cp_1,_Y_cp_2,_Y_cp_3) for (j_bk_1=0; j_bk_1<N; j_bk_1+=_NB_1) { /*@;BEGIN(nest1_group2=Nest)@*/for (j_bk_2=0; j_bk_2<-31+min(_NB_1,N-j_bk_1); j_bk_2+=32) { /*@;BEGIN(nest2_group2=Nest)@*/for (i_bk_3=0; i_bk_3<-31+M; i_bk_3+=32) { /*@;BEGIN(nest1=Nest)@*/for (j=0; j<32; j+=4) { _Y_cp_0 = Y[j_bk_1*incY+(j_bk_2*incY+j*incY)]; _Y_cp_1 = Y[j_bk_1*incY+(j_bk_2*incY+(incY+j*incY))]; _Y_cp_2 = Y[j_bk_1*incY+(j_bk_2*incY+(2*incY+j*incY))]; _Y_cp_3 = Y[j_bk_1*incY+(j_bk_2*incY+(3*incY+j*incY))]; /*@;BEGIN(nest2=Nest)@*/for (i=0; i<32; i+=4) { _X_cp_0 = X[i_bk_3+i]; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))]+_X_cp_0*_Y_cp_0; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(lda+j*lda))))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(lda+j*lda))))]+_X_cp_0*_Y_cp_1; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(2*lda+j*lda))))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(2*lda+j*lda))))]+_X_cp_0*_Y_cp_2; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(3*lda+j*lda))))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(3*lda+j*lda))))]+_X_cp_0*_Y_cp_3; _X_cp_0 = X[i_bk_3+(1+i)]; A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i))))] = A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i))))]+_X_cp_0*_Y_cp_0; A[j*lda+(lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i)))))] = A[j*lda+(lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i)))))]+_X_cp_0*_Y_cp_1; A[j*lda+(2*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i)))))] = A[j*lda+(2*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i)))))]+_X_cp_0*_Y_cp_2; A[j*lda+(3*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i)))))] = A[j*lda+(3*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i)))))]+_X_cp_0*_Y_cp_3; _X_cp_0 = X[i_bk_3+(2+i)]; A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i))))] = A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i))))]+_X_cp_0*_Y_cp_0; A[j*lda+(lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i)))))] = A[j*lda+(lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i)))))]+_X_cp_0*_Y_cp_1; A[j*lda+(2*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i)))))] = A[j*lda+(2*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i)))))]+_X_cp_0*_Y_cp_2; A[j*lda+(3*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i)))))] = A[j*lda+(3*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i)))))]+_X_cp_0*_Y_cp_3; _X_cp_0 = X[i_bk_3+(3+i)]; A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i))))] = A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i))))]+_X_cp_0*_Y_cp_0; A[j*lda+(lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i)))))] = A[j*lda+(lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i)))))]+_X_cp_0*_Y_cp_1; A[j*lda+(2*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i)))))] = A[j*lda+(2*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i)))))]+_X_cp_0*_Y_cp_2; A[j*lda+(3*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i)))))] = A[j*lda+(3*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i)))))]+_X_cp_0*_Y_cp_3; } } } if (i_bk_3<M) { for (j=0; j<32; j+=4) { _Y_cp_0 = Y[j_bk_1*incY+(j_bk_2*incY+j*incY)]; _Y_cp_1 = Y[j_bk_1*incY+(j_bk_2*incY+(incY+j*incY))]; _Y_cp_2 = Y[j_bk_1*incY+(j_bk_2*incY+(2*incY+j*incY))]; _Y_cp_3 = Y[j_bk_1*incY+(j_bk_2*incY+(3*incY+j*incY))]; for (i=0; i<M-i_bk_3; i+=1) { _X_cp_0 = X[i_bk_3+i]; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))]+_X_cp_0*_Y_cp_0; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(lda+j*lda))))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(lda+j*lda))))]+_X_cp_0*_Y_cp_1; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(2*lda+j*lda))))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(2*lda+j*lda))))]+_X_cp_0*_Y_cp_2; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(3*lda+j*lda))))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(3*lda+j*lda))))]+_X_cp_0*_Y_cp_3; } } } } if (j_bk_2<min(_NB_1,N-j_bk_1)) { for (i_bk_3=0; i_bk_3<-31+M; i_bk_3+=32) { for (j=0; j<min(_NB_1-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1) { _Y_cp_0 = Y[j_bk_1*incY+(j_bk_2*incY+j*incY)]; for (i=0; i<32; i+=4) { _X_cp_0 = X[i_bk_3+i]; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))]+_X_cp_0*_Y_cp_0; _X_cp_0 = X[i_bk_3+(1+i)]; A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i))))] = A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i))))]+_X_cp_0*_Y_cp_0; _X_cp_0 = X[i_bk_3+(2+i)]; A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i))))] = A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i))))]+_X_cp_0*_Y_cp_0; _X_cp_0 = X[i_bk_3+(3+i)]; A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i))))] = A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i))))]+_X_cp_0*_Y_cp_0; } } } if (i_bk_3<M) { for (j=0; j<min(_NB_1-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1) { _Y_cp_0 = Y[j_bk_1*incY+(j_bk_2*incY+j*incY)]; for (i=0; i<M-i_bk_3; i+=1) { _X_cp_0 = X[i_bk_3+i]; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))]+_X_cp_0*_Y_cp_0; } } } } } } }
convolution_5x5_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv5x5s1_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out0 = top_blob.channel(p); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); for (int q=0; q<inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); const float* kptr = (const float*)kernel.channel(p).row(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j+3<outw; j+=4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n"// sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"// r00 r01 r02 r03 "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "fmla v23.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v1.s[2] \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "fmla v22.4s, v19.4s, v2.s[3] \n" "fmla v23.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1] \n"// r04 r05 r06 r07 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "fmla v23.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v3.s[3] \n" "fmla v23.4s, v27.4s, v4.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v5.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r10 r11 r12 r13 "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "fmla v20.4s, v24.4s, v0.s[0] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v1.s[1] \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "fmla v23.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v0.s[2] \n" "fmla v21.4s, v26.4s, v1.s[2] \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v1.s[3] \n" "fmla v22.4s, v27.4s, v2.s[3] \n" "fmla v23.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2] \n"// r14 r15 r16 r17 "fmla v20.4s, v16.4s, v1.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "fmla v23.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v1.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v3.s[3] \n" "fmla v23.4s, v19.4s, v4.s[3] \n" "fmla v20.4s, v24.4s, v2.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "fmla v23.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v2.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v4.s[3] \n" "fmla v23.4s, v27.4s, v5.s[3] \n" "fmla v20.4s, v16.4s, v3.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v5.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v5.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v3.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v5.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v5.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r20 r21 r22 r23 "fmla v20.4s, v24.4s, v4.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v6.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v6.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v4.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v6.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v6.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "fmla v23.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v1.s[2] \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "fmla v22.4s, v19.4s, v2.s[3] \n" "fmla v23.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3] \n"// r24 r25 r26 r27 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "fmla v23.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v3.s[3] \n" "fmla v23.4s, v27.4s, v4.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v5.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4], #64 \n"// r30 r31 r32 r33 "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "fmla v20.4s, v24.4s, v0.s[0] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v1.s[1] \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "fmla v23.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v0.s[2] \n" "fmla v21.4s, v26.4s, v1.s[2] \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v1.s[3] \n" "fmla v22.4s, v27.4s, v2.s[3] \n" "fmla v23.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4] \n"// r34 r35 r36 r37 "fmla v20.4s, v16.4s, v1.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "fmla v23.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v1.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v3.s[3] \n" "fmla v23.4s, v19.4s, v4.s[3] \n" "fmla v20.4s, v24.4s, v2.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "fmla v23.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v2.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v4.s[3] \n" "fmla v23.4s, v27.4s, v5.s[3] \n" "fmla v20.4s, v16.4s, v3.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v5.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v5.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v3.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v5.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v5.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"// r40 r41 r42 r43 "fmla v20.4s, v24.4s, v4.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v6.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v6.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v4.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v6.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v6.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "fmla v23.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v1.s[2] \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "fmla v22.4s, v19.4s, v2.s[3] \n" "fmla v23.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%5] \n"// r44 r45 r46 r47 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "fmla v23.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v3.s[3] \n" "fmla v23.4s, v27.4s, v4.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v5.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" // "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "sub %6, %6, #1536 \n"// kptr -= 24 * 16; "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0, {d24-d31} \n"// sum0 sum1 sum2 sum3 "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n"// r00 r01 r02 r03 "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%1, #512] \n" "vldm %1, {d8-d15} \n"// r04 r05 r06 r07 "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d6[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d7[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n"// r10 r11 r12 r13 "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%2, #512] \n" "vldm %2, {d8-d15} \n"// r14 r15 r16 r17 "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d6[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d7[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%3, #512] \n" "vldm %3!, {d0-d7} \n"// r20 r21 r22 r23 "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%3, #512] \n" "vldm %3, {d8-d15} \n"// r24 r25 r26 r27 "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d6[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d7[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%4, #512] \n" "vldm %4!, {d0-d7} \n"// r30 r31 r32 r33 "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%4, #512] \n" "vldm %4, {d8-d15} \n"// r34 r35 r36 r37 "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d6[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d7[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n"// r40 r41 r42 r43 "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%5, #512] \n" "vldm %5, {d8-d15} \n"// r44 r45 r46 r47 "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d6[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d7[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d13[1] \n" // "pld [%6, #512] \n" "vldm %6, {d16-d23} \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d15[1] \n" "sub %6, %6, #1536 \n"// kptr -= 24 * 16; "vstm %0!, {d24-d31} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j+1<outw; j+=2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v20.4s, v21.4s}, [%0] \n"// sum0 sum1 "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1], #32 \n"// r00 r01 "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmul v22.4s, v16.4s, v0.s[0] \n" "fmul v23.4s, v16.4s, v1.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v2.4s, v3.4s, v4.4s, v5.4s}, [%1] \n"// r02 r03 r04 r05 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n"// r10 r11 "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v24.4s, v0.s[0] \n" "fmla v23.4s, v24.4s, v1.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v1.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v2.4s, v3.4s, v4.4s, v5.4s}, [%2] \n"// r12 r13 r14 r15 "fmla v22.4s, v16.4s, v1.s[0] \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n"// r20 r21 "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v16.4s, v0.s[0] \n" "fmla v23.4s, v16.4s, v1.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v2.4s, v3.4s, v4.4s, v5.4s}, [%3] \n"// r22 r23 r24 r25 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v0.4s, v1.4s}, [%4], #32 \n"// r30 r31 "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v24.4s, v0.s[0] \n" "fmla v23.4s, v24.4s, v1.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v2.4s, v3.4s, v4.4s, v5.4s}, [%4] \n"// r32 r33 r34 r35 "fmla v22.4s, v16.4s, v1.s[0] \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s, v1.4s}, [%5], #32 \n"// r40 r41 "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v16.4s, v0.s[0] \n" "fmla v23.4s, v16.4s, v1.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v1.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v2.4s, v3.4s, v4.4s, v5.4s}, [%5] \n"// r42 r43 r44 r45 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" // "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "sub %6, %6, #1536 \n"// kptr -= 24 * 16; "st1 {v20.4s, v21.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128] \n"// sum0 sum1 "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n"// r00 r01 "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmul.f32 q14, q8, d0[0] \n" "vmul.f32 q15, q8, d2[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%1, #512] \n" "vldm %1, {d4-d11} \n"// r02 r03 r04 r05 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n"// r10 r11 "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d2[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%2, #512] \n" "vldm %2, {d4-d11} \n"// r12 r13 r14 r15 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n"// r20 r21 "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d2[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%3, #512] \n" "vldm %3, {d4-d11} \n"// r22 r23 r24 r25 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%4, #256] \n" "vld1.f32 {d0-d3}, [%4 :128]! \n"// r30 r31 "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d2[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%4, #512] \n" "vldm %4, {d4-d11} \n"// r32 r33 r34 r35 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%5, #256] \n" "vld1.f32 {d0-d3}, [%5 :128]! \n"// r40 r41 "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d2[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%5, #512] \n" "vldm %5, {d4-d11} \n"// r42 r43 r44 r45 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d9[1] \n" // "pld [%6, #512] \n" "vldm %6, {d16-d23} \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %6, %6, #1536 \n"// kptr -= 24 * 16; "vst1.f32 {d24-d27}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j<outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n"// sum0 "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1], #16 \n"// r00 "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v1.4s, v2.4s, v3.4s, v4.4s}, [%1] \n"// r01 r02 r03 r04 "fmul v21.4s, v16.4s, v0.s[0] \n" "fmul v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmul v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n"// r10 "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v1.4s, v2.4s, v3.4s, v4.4s}, [%2] \n"// r11 r12 r13 r14 "fmla v21.4s, v24.4s, v0.s[0] \n" "fmla v22.4s, v25.4s, v0.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v0.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "fmla v22.4s, v17.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n"// r20 "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v1.4s, v2.4s, v3.4s, v4.4s}, [%3] \n"// r21 r22 r23 r24 "fmla v21.4s, v16.4s, v0.s[0] \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v0.4s}, [%4], #16 \n"// r30 "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v1.4s, v2.4s, v3.4s, v4.4s}, [%4] \n"// r31 r32 r33 r34 "fmla v21.4s, v24.4s, v0.s[0] \n" "fmla v22.4s, v25.4s, v0.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v0.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v16.4s, v1.s[0] \n" "fmla v22.4s, v17.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n"// r40 "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v1.4s, v2.4s, v3.4s, v4.4s}, [%5] \n"// r41 r42 r43 r44 "fmla v21.4s, v16.4s, v0.s[0] \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" // "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fadd v22.4s, v21.4s, v22.4s \n" "fadd v23.4s, v22.4s, v23.4s \n" "fadd v20.4s, v20.4s, v23.4s \n" "sub %6, %6, #1536 \n"// kptr -= 24 * 16; "st1 {v20.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%0, #128] \n" "vld1.f32 {d24-d25}, [%0 :128] \n"// sum0 "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1 :128]! \n"// r00 "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmul.f32 q13, q8, d0[0] \n" "vmul.f32 q14, q9, d0[1] \n" "vmul.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%1, #512] \n" "vldm %1, {d2-d9} \n"// r01 r02 r03 r04 "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n"// r10 "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%2, #512] \n" "vldm %2, {d2-d9} \n"// r11 r12 r13 r14 "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%3, #128] \n" "vld1.f32 {d0-d1}, [%3 :128]! \n"// r20 "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%3, #512] \n" "vldm %3, {d2-d9} \n"// r21 r22 r23 r24 "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%4, #128] \n" "vld1.f32 {d0-d1}, [%4 :128]! \n"// r30 "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%4, #512] \n" "vldm %4, {d2-d9} \n"// r31 r32 r33 r34 "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n"// r40 "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%5, #512] \n" "vldm %5, {d2-d9} \n"// r41 r42 r43 r44 "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d7[1] \n" // "pld [%6, #512] \n" "vldm %6, {d16-d23} \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vadd.f32 q13, q13, q14 \n" "vadd.f32 q12, q12, q15 \n" "vadd.f32 q12, q12, q13 \n" "sub %6, %6, #1536 \n"// kptr -= 24 * 16; "vst1.f32 {d24-d25}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } r0 += 4*4; r1 += 4*4; r2 += 4*4; r3 += 4*4; r4 += 4*4; } } } } static void conv5x5s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = (w - 2*outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out0 = top_blob.channel(p); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); for (int q=0; q<inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); const float* kptr = (const float*)kernel.channel(p).row(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j+3<outw; j+=4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n"// sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"// r00 r01 r02 r03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"// r04 r05 r06 r07 "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%1, #384] \n" "ld1 {v28.4s, v29.4s, v30.4s}, [%1] \n"// r08 r09 r010 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v7.s[0] \n" "fmla v23.4s, v24.4s, v29.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v7.s[1] \n" "fmla v23.4s, v25.4s, v29.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v7.s[2] \n" "fmla v23.4s, v26.4s, v29.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v7.s[3] \n" "fmla v23.4s, v27.4s, v29.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r10 r11 r12 r13 "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v6.s[0] \n" "fmla v22.4s, v16.4s, v28.s[0] \n" "fmla v23.4s, v16.4s, v30.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "fmla v22.4s, v17.4s, v28.s[1] \n" "fmla v23.4s, v17.4s, v30.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v6.s[2] \n" "fmla v22.4s, v18.4s, v28.s[2] \n" "fmla v23.4s, v18.4s, v30.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fmla v22.4s, v19.4s, v28.s[3] \n" "fmla v23.4s, v19.4s, v30.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n"// r14 r15 r16 r17 "fmla v20.4s, v24.4s, v0.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v0.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v4.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v28.4s, v29.4s, v30.4s}, [%2] \n"// r18 r19 r110 "fmla v20.4s, v16.4s, v1.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v5.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v5.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v1.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v5.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v5.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "fmla v20.4s, v24.4s, v2.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v6.s[0] \n" "fmla v23.4s, v24.4s, v28.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v6.s[1] \n" "fmla v23.4s, v25.4s, v28.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v2.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v6.s[2] \n" "fmla v23.4s, v26.4s, v28.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v6.s[3] \n" "fmla v23.4s, v27.4s, v28.s[3] \n" "fmla v20.4s, v16.4s, v3.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v7.s[0] \n" "fmla v23.4s, v16.4s, v29.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v7.s[1] \n" "fmla v23.4s, v17.4s, v29.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v3.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v7.s[2] \n" "fmla v23.4s, v18.4s, v29.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v7.s[3] \n" "fmla v23.4s, v19.4s, v29.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r20 r21 r22 r23 "fmla v20.4s, v24.4s, v4.s[0] \n" "fmla v21.4s, v24.4s, v6.s[0] \n" "fmla v22.4s, v24.4s, v28.s[0] \n" "fmla v23.4s, v24.4s, v30.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v6.s[1] \n" "fmla v22.4s, v25.4s, v28.s[1] \n" "fmla v23.4s, v25.4s, v30.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v4.s[2] \n" "fmla v21.4s, v26.4s, v6.s[2] \n" "fmla v22.4s, v26.4s, v28.s[2] \n" "fmla v23.4s, v26.4s, v30.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v6.s[3] \n" "fmla v22.4s, v27.4s, v28.s[3] \n" "fmla v23.4s, v27.4s, v30.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"// r24 r25 r26 r27 "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v28.4s, v29.4s, v30.4s}, [%3] \n"// r28 r29 r210 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v7.s[0] \n" "fmla v23.4s, v24.4s, v29.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v7.s[1] \n" "fmla v23.4s, v25.4s, v29.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v7.s[2] \n" "fmla v23.4s, v26.4s, v29.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v7.s[3] \n" "fmla v23.4s, v27.4s, v29.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4], #64 \n"// r30 r31 r32 r33 "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v6.s[0] \n" "fmla v22.4s, v16.4s, v28.s[0] \n" "fmla v23.4s, v16.4s, v30.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "fmla v22.4s, v17.4s, v28.s[1] \n" "fmla v23.4s, v17.4s, v30.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v6.s[2] \n" "fmla v22.4s, v18.4s, v28.s[2] \n" "fmla v23.4s, v18.4s, v30.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fmla v22.4s, v19.4s, v28.s[3] \n" "fmla v23.4s, v19.4s, v30.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"// r34 r35 r36 r37 "fmla v20.4s, v24.4s, v0.s[0] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "fmla v23.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v0.s[2] \n" "fmla v21.4s, v26.4s, v2.s[2] \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "fmla v22.4s, v27.4s, v4.s[3] \n" "fmla v23.4s, v27.4s, v6.s[3] \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v28.4s, v29.4s, v30.4s}, [%4] \n"// r38 r39 r310 "fmla v20.4s, v16.4s, v1.s[0] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v16.4s, v5.s[0] \n" "fmla v23.4s, v16.4s, v7.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "fmla v22.4s, v17.4s, v5.s[1] \n" "fmla v23.4s, v17.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v1.s[2] \n" "fmla v21.4s, v18.4s, v3.s[2] \n" "fmla v22.4s, v18.4s, v5.s[2] \n" "fmla v23.4s, v18.4s, v7.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v19.4s, v5.s[3] \n" "fmla v23.4s, v19.4s, v7.s[3] \n" "fmla v20.4s, v24.4s, v2.s[0] \n" "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v24.4s, v6.s[0] \n" "fmla v23.4s, v24.4s, v28.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "fmla v22.4s, v25.4s, v6.s[1] \n" "fmla v23.4s, v25.4s, v28.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v2.s[2] \n" "fmla v21.4s, v26.4s, v4.s[2] \n" "fmla v22.4s, v26.4s, v6.s[2] \n" "fmla v23.4s, v26.4s, v28.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v27.4s, v6.s[3] \n" "fmla v23.4s, v27.4s, v28.s[3] \n" "fmla v20.4s, v16.4s, v3.s[0] \n" "fmla v21.4s, v16.4s, v5.s[0] \n" "fmla v22.4s, v16.4s, v7.s[0] \n" "fmla v23.4s, v16.4s, v29.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "fmla v22.4s, v17.4s, v7.s[1] \n" "fmla v23.4s, v17.4s, v29.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v3.s[2] \n" "fmla v21.4s, v18.4s, v5.s[2] \n" "fmla v22.4s, v18.4s, v7.s[2] \n" "fmla v23.4s, v18.4s, v29.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "fmla v22.4s, v19.4s, v7.s[3] \n" "fmla v23.4s, v19.4s, v29.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"// r40 r41 r42 r43 "fmla v20.4s, v24.4s, v4.s[0] \n" "fmla v21.4s, v24.4s, v6.s[0] \n" "fmla v22.4s, v24.4s, v28.s[0] \n" "fmla v23.4s, v24.4s, v30.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v6.s[1] \n" "fmla v22.4s, v25.4s, v28.s[1] \n" "fmla v23.4s, v25.4s, v30.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v4.s[2] \n" "fmla v21.4s, v26.4s, v6.s[2] \n" "fmla v22.4s, v26.4s, v28.s[2] \n" "fmla v23.4s, v26.4s, v30.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v6.s[3] \n" "fmla v22.4s, v27.4s, v28.s[3] \n" "fmla v23.4s, v27.4s, v30.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%5], #64 \n"// r44 r45 r46 r47 "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%5, #384] \n" "ld1 {v28.4s, v29.4s, v30.4s}, [%5] \n"// r48 r49 r410 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "fmla v20.4s, v24.4s, v3.s[0] \n" "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v24.4s, v7.s[0] \n" "fmla v23.4s, v24.4s, v29.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "fmla v22.4s, v25.4s, v7.s[1] \n" "fmla v23.4s, v25.4s, v29.s[1] \n" // "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n" "fmla v20.4s, v26.4s, v3.s[2] \n" "fmla v21.4s, v26.4s, v5.s[2] \n" "fmla v22.4s, v26.4s, v7.s[2] \n" "fmla v23.4s, v26.4s, v29.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v27.4s, v7.s[3] \n" "fmla v23.4s, v27.4s, v29.s[3] \n" "fmla v20.4s, v16.4s, v4.s[0] \n" "fmla v21.4s, v16.4s, v6.s[0] \n" "fmla v22.4s, v16.4s, v28.s[0] \n" "fmla v23.4s, v16.4s, v30.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "fmla v22.4s, v17.4s, v28.s[1] \n" "fmla v23.4s, v17.4s, v30.s[1] \n" "fmla v20.4s, v18.4s, v4.s[2] \n" "fmla v21.4s, v18.4s, v6.s[2] \n" "fmla v22.4s, v18.4s, v28.s[2] \n" "fmla v23.4s, v18.4s, v30.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fmla v22.4s, v19.4s, v28.s[3] \n" "fmla v23.4s, v19.4s, v30.s[3] \n" "sub %6, %6, #1536 \n"// kptr -= 24 * 16; "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0, {d24-d31} \n"// sum0 sum1 sum2 sum3 "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n"// r00 r01 r02 r03 "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n"// r04 r05 r06 r07 "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n"// r08 r09 "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d6[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d14[0] \n" "vmla.f32 q15, q8, d2[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d14[1] \n" "vmla.f32 q15, q9, d2[1] \n" "vmla.f32 q12, q10, d7[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d15[0] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d15[1] \n" "vmla.f32 q15, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%1, #128] \n" "vld1.f32 {d4-d5}, [%1 :128] \n"// r010 "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "pld [%2, #512] \n" "vldm %2!, {d8-d15} \n"// r10 r11 r12 r13 "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n"// r14 r15 r16 r17 "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d10[0] \n" "vmla.f32 q13, q8, d14[0] \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d10[1] \n" "vmla.f32 q13, q9, d14[1] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d11[0] \n" "vmla.f32 q13, q10, d15[0] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d11[1] \n" "vmla.f32 q13, q11, d15[1] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%2, #256] \n" "vld1.f32 {d8-d11}, [%2 :128]! \n"// r18 r19 "vmla.f32 q12, q8, d12[0] \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d12[1] \n" "vmla.f32 q13, q9, d0[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d13[0] \n" "vmla.f32 q13, q10, d1[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d13[1] \n" "vmla.f32 q13, q11, d1[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d14[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d14[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d15[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d15[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%2, #128] \n" "vld1.f32 {d12-d13}, [%2 :128] \n"// r110 "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%3, #512] \n" "vldm %3!, {d0-d7} \n"// r20 r21 r22 r23 "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n"// r24 r25 r26 r27 "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n"// r28 r29 "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d6[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d14[0] \n" "vmla.f32 q15, q8, d2[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d14[1] \n" "vmla.f32 q15, q9, d2[1] \n" "vmla.f32 q12, q10, d7[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d15[0] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d15[1] \n" "vmla.f32 q15, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%3, #128] \n" "vld1.f32 {d4-d5}, [%3 :128] \n"// r210 "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n"// r30 r31 r32 r33 "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%4, #512] \n" "vldm %4!, {d0-d7} \n"// r34 r35 r36 r37 "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d10[0] \n" "vmla.f32 q13, q8, d14[0] \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d10[1] \n" "vmla.f32 q13, q9, d14[1] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d11[0] \n" "vmla.f32 q13, q10, d15[0] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d11[1] \n" "vmla.f32 q13, q11, d15[1] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4 :128]! \n"// r38 r39 "vmla.f32 q12, q8, d12[0] \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d12[1] \n" "vmla.f32 q13, q9, d0[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d13[0] \n" "vmla.f32 q13, q10, d1[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d13[1] \n" "vmla.f32 q13, q11, d1[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d14[0] \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d14[1] \n" "vmla.f32 q13, q9, d2[1] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q9, d10[1] \n" "vmla.f32 q12, q10, d15[0] \n" "vmla.f32 q13, q10, d3[0] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d15[1] \n" "vmla.f32 q13, q11, d3[1] \n" "vmla.f32 q14, q11, d7[1] \n" "vmla.f32 q15, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%4, #128] \n" "vld1.f32 {d12-d13}, [%4 :128] \n"// r310 "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n"// r40 r41 r42 r43 "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%5, #512] \n" "vldm %5!, {d8-d15} \n"// r44 r45 r46 r47 "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%5, #256] \n" "vld1.f32 {d0-d3}, [%5 :128]! \n"// r48 r49 "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q12, q8, d6[0] \n" "vmla.f32 q13, q8, d10[0] \n" "vmla.f32 q14, q8, d14[0] \n" "vmla.f32 q15, q8, d2[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q9, d14[1] \n" "vmla.f32 q15, q9, d2[1] \n" "vmla.f32 q12, q10, d7[0] \n" "vmla.f32 q13, q10, d11[0] \n" "vmla.f32 q14, q10, d15[0] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d11[1] \n" "vmla.f32 q14, q11, d15[1] \n" "vmla.f32 q15, q11, d3[1] \n" // "pld [%6, #512] \n" "vldm %6, {d16-d23} \n" "pld [%5, #128] \n" "vld1.f32 {d4-d5}, [%5 :128] \n"// r410 "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "sub %6, %6, #1536 \n"// kptr -= 24 * 16; "sub %1, %1, #32 \n" "sub %2, %2, #32 \n" "sub %3, %3, #32 \n" "sub %4, %4, #32 \n" "sub %5, %5, #32 \n" "vstm %0!, {d24-d31} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j+1<outw; j+=2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v20.4s, v21.4s}, [%0] \n"// sum0 sum1 "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"// r00 r01 r02 r03 "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmul v22.4s, v16.4s, v0.s[0] \n" "fmul v23.4s, v16.4s, v2.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%1, #384] \n" "ld1 {v4.4s, v5.4s, v6.4s}, [%1] \n"// r04 r05 r06 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r10 r11 r12 r13 "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fmla v22.4s, v24.4s, v0.s[0] \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v4.4s, v5.4s, v6.4s}, [%2] \n"// r14 r15 r16 "fmla v22.4s, v16.4s, v1.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r20 r21 r22 r23 "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v6.s[3] \n" "fmla v22.4s, v16.4s, v0.s[0] \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v4.4s, v5.4s, v6.4s}, [%3] \n"// r24 r25 r26 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4], #64 \n"// r30 r31 r32 r33 "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fmla v22.4s, v24.4s, v0.s[0] \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v4.4s, v5.4s, v6.4s}, [%4] \n"// r34 r35 r36 "fmla v22.4s, v16.4s, v1.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "fmla v22.4s, v16.4s, v3.s[0] \n" "fmla v23.4s, v16.4s, v5.s[0] \n" "fmla v20.4s, v17.4s, v3.s[1] \n" "fmla v21.4s, v17.4s, v5.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v3.s[2] \n" "fmla v23.4s, v18.4s, v5.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "fmla v21.4s, v19.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"// r40 r41 r42 r43 "fmla v22.4s, v24.4s, v4.s[0] \n" "fmla v23.4s, v24.4s, v6.s[0] \n" "fmla v20.4s, v25.4s, v4.s[1] \n" "fmla v21.4s, v25.4s, v6.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v4.s[2] \n" "fmla v23.4s, v26.4s, v6.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v27.4s, v6.s[3] \n" "fmla v22.4s, v16.4s, v0.s[0] \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%5, #384] \n" "ld1 {v4.4s, v5.4s, v6.4s}, [%5] \n"// r44 r45 r46 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v24.4s, v3.s[0] \n" "fmla v23.4s, v24.4s, v5.s[0] \n" "fmla v20.4s, v25.4s, v3.s[1] \n" "fmla v21.4s, v25.4s, v5.s[1] \n" // "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n" "fmla v22.4s, v26.4s, v3.s[2] \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v27.4s, v5.s[3] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v4.s[1] \n" "fmla v21.4s, v17.4s, v6.s[1] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v19.4s, v6.s[3] \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "sub %6, %6, #1536 \n"// kptr -= 24 * 16; "st1 {v20.4s, v21.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128] \n"// sum0 sum1 "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n"// r00 r01 r02 r03 "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmul.f32 q14, q8, d0[0] \n" "vmul.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%1, #384] \n" "vldm %1, {d8-d13} \n"// r04 r05 r06 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n"// r10 r11 r12 r13 "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%2, #384] \n" "vldm %2, {d8-d13} \n"// r14 r15 r16 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%3, #512] \n" "vldm %3!, {d0-d7} \n"// r20 r21 r22 r23 "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%3, #384] \n" "vldm %3, {d8-d13} \n"// r24 r25 r26 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%4, #512] \n" "vldm %4!, {d0-d7} \n"// r30 r31 r32 r33 "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%4, #384] \n" "vldm %4, {d8-d13} \n"// r34 r35 r36 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d11[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n"// r40 r41 r42 r43 "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%5, #384] \n" "vldm %5, {d8-d13} \n"// r44 r45 r46 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q14, q8, d6[0] \n" "vmla.f32 q15, q8, d10[0] \n" "vmla.f32 q12, q9, d6[1] \n" "vmla.f32 q13, q9, d10[1] \n" "vmla.f32 q14, q10, d7[0] \n" "vmla.f32 q15, q10, d11[0] \n" "vmla.f32 q12, q11, d7[1] \n" "vmla.f32 q13, q11, d11[1] \n" // "pld [%6, #512] \n" "vldm %6, {d16-d23} \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %6, %6, #1536 \n"// kptr -= 24 * 16; "vst1.f32 {d24-d27}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j<outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n"// sum0 "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1], #32 \n"// r00 r01 "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmul v21.4s, v16.4s, v0.s[0] \n" "fmul v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmul v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%1, #384] \n" "ld1 {v2.4s, v3.4s, v4.4s}, [%1] \n"// r02 r03 r04 "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n"// r10 r11 "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v24.4s, v0.s[0] \n" "fmla v22.4s, v25.4s, v0.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v0.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v2.4s, v3.4s, v4.4s}, [%2] \n"// r12 r13 r14 "fmla v21.4s, v16.4s, v1.s[0] \n" "fmla v22.4s, v17.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n"// r20 r21 "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v2.4s, v3.4s, v4.4s}, [%3] \n"// r22 r23 r24 "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v0.4s, v1.4s}, [%4], #32 \n"// r30 r31 "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fmla v21.4s, v24.4s, v0.s[0] \n" "fmla v22.4s, v25.4s, v0.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v0.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v2.4s, v3.4s, v4.4s}, [%4] \n"// r32 r33 r34 "fmla v21.4s, v16.4s, v1.s[0] \n" "fmla v22.4s, v17.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v1.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v24.4s, v2.s[0] \n" "fmla v22.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v16.4s, v3.s[0] \n" "fmla v22.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v3.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s, v1.4s}, [%5], #32 \n"// r40 r41 "fmla v21.4s, v24.4s, v4.s[0] \n" "fmla v22.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v4.s[3] \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "prfm pldl1keep, [%5, #384] \n" "ld1 {v2.4s, v3.4s, v4.4s}, [%5] \n"// r42 r43 r44 "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" // "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "fadd v22.4s, v21.4s, v22.4s \n" "fadd v23.4s, v22.4s, v23.4s \n" "fadd v20.4s, v20.4s, v23.4s \n" "sub %6, %6, #1536 \n"// kptr -= 24 * 16; "st1 {v20.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%0, #128] \n" "vld1.f32 {d24-d25}, [%0 :128] \n"// sum0 "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n"// r00 r01 "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmul.f32 q13, q8, d0[0] \n" "vmul.f32 q14, q9, d0[1] \n" "vmul.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%1, #384] \n" "vldm %1, {d4-d9} \n"// r02 r03 r04 "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n"// r10 r11 "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%2, #384] \n" "vldm %2, {d4-d9} \n"// r12 r13 r14 "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n"// r20 r21 "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%3, #384] \n" "vldm %3, {d4-d9} \n"// r22 r23 r24 "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%4, #256] \n" "vld1.f32 {d0-d3}, [%4 :128]! \n"// r30 r31 "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%4, #384] \n" "vldm %4, {d4-d9} \n"// r32 r33 r34 "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d7[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%5, #256] \n" "vld1.f32 {d0-d3}, [%5 :128]! \n"// r40 r41 "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "pld [%5, #384] \n" "vldm %5, {d4-d9} \n"// r42 r43 r44 "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%6, #512] \n" "vldm %6!, {d16-d23} \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d6[1] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d7[1] \n" // "pld [%6, #512] \n" "vldm %6, {d16-d23} \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vadd.f32 q14, q13, q14 \n" "vadd.f32 q15, q14, q15 \n" "vadd.f32 q12, q12, q15 \n" "sub %6, %6, #1536 \n"// kptr -= 24 * 16; "vst1.f32 {d24-d25}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } } }
GB_unop__ainv_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__ainv_uint32_uint32 // op(A') function: GB_unop_tran__ainv_uint32_uint32 // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ uint32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = aij ; \ Cx [pC] = -z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__ainv_uint32_uint32 ( uint32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = -z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__ainv_uint32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
t003.c
#include<stdint.h> #include<stdlib.h> #include<stdio.h> #include<omp.h> typedef struct {int64_t nteam; int64_t mthread; int64_t nthread;} tinfo; int main(int argc, char **argv) { tinfo *t = malloc(sizeof(tinfo)); t->nteam = -1; t->mthread = -1; t->nthread = -1; #pragma omp target teams map(t[0:1]) { if(omp_get_team_num() == 0) t->mthread = omp_get_max_threads(); #pragma omp parallel { if(omp_get_team_num() == 0 && omp_get_thread_num() == 0){ t->nteam = omp_get_num_teams(); t->nthread = omp_get_num_threads(); } } } printf("nteam: %ld mthread: %ld nthread: %ld\n", t->nteam, t->mthread, t->nthread); int ret = 0; if(t->nteam <= 0 || t->mthread <= 0 || t->nthread <= 0) ret = 1; free(t); return ret; }
pruned_landmark_labeling.h
#ifndef PRUNED_LANDMARK_LABELING_H_ #define PRUNED_LANDMARK_LABELING_H_ #include <malloc.h> #include <stdint.h> #include <xmmintrin.h> #include <sys/time.h> #include <climits> #include <iostream> #include <sstream> #include <string> #include <vector> #include <stack> #include <queue> #include <set> #include <algorithm> #include <fstream> #include <utility> #include "omp.h" #define NumThreads 8 #define constrain 7 class PrunedLandmarkLabeling { public: // Constructs an index from a graph, given as a list of edges. // Vertices should be described by numbers starting from zero. // Returns |true| when successful. bool ConstructIndex(const std::vector<std::pair<int, int> > &es); bool ConstructIndex(std::istream &ifs); bool ConstructIndex(const char *filename); // Returns distance vetween vertices |v| and |w| if they are connected. // Otherwise, returns |INT_MAX|. inline int QueryDistance(int v, int w); // Loads an index. Returns |true| when successful. bool LoadIndex(std::istream &ifs); bool LoadIndex(const char *filename); // Stores the index. Returns |true| when successful. bool StoreIndex(std::ostream &ofs); bool StoreIndex(const char *filename); int GetNumVertices() { return num_v_; } void Free(); void PrintStatistics(); // bfs check inline int DistanceCheck(int s, int t); //dfs test int dfs(int s, int t, int step, int ele); int parallel_dfs(int s, int t, int step, int ele); int para_dfs(int s, int t, int step, int ele, long id); PrunedLandmarkLabeling() : adj(NULL), index_in_(NULL), index_out_(NULL), time_load_(0), time_indexing_(0) {} virtual ~PrunedLandmarkLabeling() { Free(); } private: static const uint8_t INF8; // For unreachable pairs struct index_t { uint32_t *spt_v; uint8_t *spt_d; } __attribute__((aligned(64))); // Aligned for cache lines // bfs check struct neighbor{ uint32_t *nb; } __attribute__((aligned(64))); ; neighbor *adj; index_t *index_in_; index_t *index_out_; double GetCurrentTimeSec() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + tv.tv_usec * 1e-6; } // Statistics double time_load_, time_indexing_; public: // dfs test int num_v_; int stack[constrain + 1]; // k = 7 bool* visited; int count; int count_sum[NumThreads]; }; const uint8_t PrunedLandmarkLabeling::INF8 = 100; bool PrunedLandmarkLabeling ::ConstructIndex(const char *filename) { std::ifstream ifs(filename); // std::cout << "filename: " << filename << std::endl; return ifs && ConstructIndex(ifs); } bool PrunedLandmarkLabeling ::ConstructIndex(std::istream &ifs) { // only use the part "load graph" std::vector<std::pair<int, int> > es; for (int v, w; ifs >> v >> w; ) { // std::cout << "v: " << v << " w: " << w << std::endl; es.push_back(std::make_pair(v, w)); } if (ifs.bad()) return false; std::cout << es.size() << std::endl; Free(); time_load_ = -GetCurrentTimeSec(); int &V = num_v_; // number of vertices, count from 0 V = 0; for (size_t i = 0; i < es.size(); ++i) { V = std::max(V, std::max(es[i].first, es[i].second) + 1); }// V = the max tag of vertex + 1. std::vector<std::vector<int> > adj_in(V); std::vector<std::vector<int> > adj_out(V); for (size_t i = 0; i < es.size(); ++i) { int v = es[i].first, w = es[i].second; adj_in[w].push_back(v); adj_out[v].push_back(w); } time_load_ += GetCurrentTimeSec(); // copy original adj_out adj = (neighbor*)memalign(64, V * sizeof(neighbor)); visited = (bool*)memalign(64, V * sizeof(bool)); for (int v = 0; v < V; ++v) { int k = adj_out[v].size(); adj[v].nb = (uint32_t*)memalign(64, (k + 1) * sizeof(uint32_t)); for (int j = 0; j < k; j++){ adj[v].nb[j] = adj_out[v][j]; } adj[v].nb[k] = V + 1; visited[v] = false; } std::cout << "num_v_: " << num_v_ << " V: " << V << std::endl; // ConstructIndex(es); return true; } bool PrunedLandmarkLabeling ::ConstructIndex(const std::vector<std::pair<int, int> > &es) { // // Prepare the adjacency list and index space // Free(); time_load_ = -GetCurrentTimeSec(); int &V = num_v_; // number of vertices, count from 0 V = 0; for (size_t i = 0; i < es.size(); ++i) { V = std::max(V, std::max(es[i].first, es[i].second) + 1); }// V = the max tag of vertex + 1. std::vector<std::vector<int> > adj_in(V); std::vector<std::vector<int> > adj_out(V); for (size_t i = 0; i < es.size(); ++i) { int v = es[i].first, w = es[i].second; adj_in[w].push_back(v); adj_out[v].push_back(w); } time_load_ += GetCurrentTimeSec(); // copy original adj_out adj = (neighbor*)memalign(64, V * sizeof(neighbor)); for (int v = 0; v < V; ++v) { int k = adj_out[v].size(); adj[v].nb = (uint32_t*)memalign(64, (k + 1) * sizeof(uint32_t)); for (int j = 0; j < k; j++){ adj[v].nb[j] = adj_out[v][j]; } adj[v].nb[k] = V + 1; } // void * memalign (size_t boundary, size_t size) index_in_ = (index_t*)memalign(64, V * sizeof(index_t)); index_out_ = (index_t*)memalign(64, V * sizeof(index_t)); if (index_in_ == NULL || index_out_ == NULL) { num_v_ = 0; return false; } for (int v = 0; v < V; ++v) { index_in_[v].spt_v = NULL; index_in_[v].spt_d = NULL; index_out_[v].spt_v = NULL; index_out_[v].spt_d = NULL; } // // Order vertices by decreasing order of degree // time_indexing_ = -GetCurrentTimeSec(); std::vector<int> inv(V); { // Order std::vector<std::pair<float, int> > deg(V); for (int v = 0; v < V; ++v) { // We add a random value here to diffuse nearby vertices // + float(rand()) / RAND_MAX deg[v] = std::make_pair(adj_out[v].size(), v); } std::sort(deg.rbegin(), deg.rend()); for (int i = 0; i < V; ++i) inv[i] = deg[i].second; // inv[new label] = old label // Relabel the vertex IDs std::vector<int> rank(V); // rank[old label] = new label for (int i = 0; i < V; ++i){ rank[deg[i].second] = i; } std::vector<std::vector<int> > new_adj_in(V); std::vector<std::vector<int> > new_adj_out(V); for (int v = 0; v < V; ++v) { for (size_t i = 0; i < adj_in[v].size(); ++i) { new_adj_in[rank[v]].push_back(rank[adj_in[v][i]]); } for (size_t i = 0; i < adj_out[v].size(); ++i) { new_adj_out[rank[v]].push_back(rank[adj_out[v][i]]); } } adj_in.swap(new_adj_in); adj_out.swap(new_adj_out); } // // Bit-parallel labeling // // ??????V??0??false?? std::vector<bool> usd(V, false); // Used as root? (in new label) // // Pruned labeling // // pruned BFSs using normal labels for pruning { // Sentinel (V, INF8) is added to all the vertices std::vector<std::vector<std::pair<int, uint8_t>>> tmp_idx_in(V, (std::vector<std::pair<int, uint8_t>>(1, std::make_pair(V, INF8)))); std::vector<std::vector<std::pair<int, uint8_t>>> tmp_idx_out(V, (std::vector<std::pair<int, uint8_t>>(1, std::make_pair(V, INF8)))); std::vector<bool> vis_in(V); std::vector<bool> vis_out(V); std::vector<int> que_in(V); // queue std::vector<int> que_out(V); // queue std::vector<uint8_t> dst_r_in(V + 1, INF8); // distance to r std::vector<uint8_t> dst_r_out(V + 1, INF8); // distance to r for (int r = 0; r < V; ++r) { //if (usd[r]) continue; std::vector<std::pair<int, uint8_t>> &tmp_idx_r_in = tmp_idx_in[r]; std::vector<std::pair<int, uint8_t>> &tmp_idx_r_out = tmp_idx_out[r]; for (size_t i = 0; i < tmp_idx_r_in.size(); ++i) { dst_r_in[tmp_idx_r_in[i].first] = tmp_idx_r_in[i].second; } for (size_t i = 0; i < tmp_idx_r_out.size(); ++i) { dst_r_out[tmp_idx_r_out[i].first] = tmp_idx_r_out[i].second; } int que_t0_in = 0, que_t1_in = 0, que_h_in = 0; int que_t0_out = 0, que_t1_out = 0, que_h_out = 0; // que_t0 ~ que_t1: ????????????que_t1 ~ que_h: ???????????????? que_in[que_h_in++] = r; que_out[que_h_out++] = r; vis_in[r] = true; vis_out[r] = true; que_t1_in = que_h_in; que_t1_out = que_h_out; for (uint8_t d = 0; que_t0_in < que_h_in || que_t0_out < que_h_out; ++d) { if (que_t0_in < que_h_in){ for (int que_i_in = que_t0_in; que_i_in < que_t1_in; ++que_i_in) { int v = que_in[que_i_in]; std::vector<std::pair<int, uint8_t>> &tmp_idx_v_out = tmp_idx_out[v]; // Prefetch _mm_prefetch(&tmp_idx_v_out[0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_r_in[0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_v_out[0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_r_in[0], _MM_HINT_T0); //Prune? if (usd[v]){ // tmp_idx_v_in.back() = std::make_pair(r, d); // tmp_idx_r_out.back() = std::make_pair(v, d); // tmp_idx_v_in.push_back(std::make_pair(V, INF8)); // tmp_idx_r_out.push_back(std::make_pair(V, INF8)); // dst_r[v] = d; continue; } for (size_t i = 0; i < tmp_idx_v_out.size(); ++i) { int w = tmp_idx_v_out[i].first; int td = tmp_idx_v_out[i].second + dst_r_in[w]; if (td <= d) goto pruned_in; } // Traverse tmp_idx_v_out.back() = std::make_pair(r, d); tmp_idx_r_in.back() = std::make_pair(v, d); tmp_idx_v_out.push_back(std::make_pair(V, INF8)); tmp_idx_r_in.push_back(std::make_pair(V, INF8)); dst_r_in[v] = d; for (size_t i = 0; i < adj_in[v].size(); ++i) { int w = adj_in[v][i]; if (!vis_in[w]) { que_in[que_h_in++] = w; vis_in[w] = true; } } pruned_in: {} } que_t0_in = que_t1_in; que_t1_in = que_h_in; } if (que_t0_out < que_h_out){ for (int que_i_out = que_t0_out; que_i_out < que_t1_out; ++que_i_out) { int v = que_out[que_i_out]; std::vector<std::pair<int, uint8_t>> &tmp_idx_v_in = tmp_idx_in[v]; // Prefetch _mm_prefetch(&tmp_idx_v_in[0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_r_out[0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_v_in[0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_r_out[0], _MM_HINT_T0); // Prune? if (usd[v]){ // tmp_idx_v_in.back() = std::make_pair(r, d); // tmp_idx_r_out.back() = std::make_pair(v, d); // tmp_idx_v_in.push_back(std::make_pair(V, INF8)); // tmp_idx_r_out.push_back(std::make_pair(V, INF8)); // dst_r[v] = d; continue; } for (size_t i = 0; i < tmp_idx_v_in.size(); ++i) { int w = tmp_idx_v_in[i].first; int td = tmp_idx_v_in[i].second + dst_r_out[w]; if (td <= d) goto pruned_out; } // Traverse tmp_idx_v_in.back() = std::make_pair(r, d); tmp_idx_r_out.back() = std::make_pair(v, d); tmp_idx_v_in.push_back(std::make_pair(V, INF8)); tmp_idx_r_out.push_back(std::make_pair(V, INF8)); dst_r_out[v] = d; for (size_t i = 0; i < adj_out[v].size(); ++i) { int w = adj_out[v][i]; if (!vis_out[w]) { que_out[que_h_out++] = w; vis_out[w] = true; } } pruned_out: {} } que_t0_out = que_t1_out; que_t1_out = que_h_out; } } for (int i = 0; i < que_h_in; ++i) vis_in[que_in[i]] = false; for (int i = 0; i < que_h_out; ++i) vis_out[que_out[i]] = false; for (size_t i = 0; i < tmp_idx_r_in.size(); ++i) { dst_r_in[tmp_idx_r_in[i].first] = INF8; } for (size_t i = 0; i < tmp_idx_r_out.size(); ++i) { dst_r_out[tmp_idx_r_out[i].first] = INF8; } usd[r] = true; } // std::cout << "tmp_idx_in:" << std::endl; // for (size_t j = 0; j < tmp_idx_in.size(); j++){ // for (size_t i = 0; i < tmp_idx_in[j].size(); i++){ // std::cout << inv[j] << ": " << inv[tmp_idx_in[j][i].first] << " " << unsigned(tmp_idx_in[j][i].second) << std::endl; // } // } // std::cout << "tmp_idx_out:" << std::endl; // for (size_t j = 0; j < tmp_idx_out.size(); j++){ // for (size_t i = 0; i < tmp_idx_out[j].size(); i++){ // std::cout << inv[j] << ": " << inv[tmp_idx_out[j][i].first] << " " << unsigned(tmp_idx_out[j][i].second) << std::endl; // } // } for (int v = 0; v < V; ++v) { int k1 = tmp_idx_in[v].size(); index_in_[inv[v]].spt_v = (uint32_t*)memalign(64, k1 * sizeof(uint32_t)); index_in_[inv[v]].spt_d = (uint8_t *)memalign(64, k1 * sizeof(uint8_t )); int k2 = tmp_idx_out[v].size(); index_out_[inv[v]].spt_v = (uint32_t*)memalign(64, k2 * sizeof(uint32_t)); index_out_[inv[v]].spt_d = (uint8_t *)memalign(64, k2 * sizeof(uint8_t )); if (!index_in_[inv[v]].spt_v || !index_in_[inv[v]].spt_d || !index_out_[inv[v]].spt_v || !index_out_[inv[v]].spt_d) { Free(); return false; } sort(tmp_idx_in[v].begin(), tmp_idx_in[v].end()); for (int i = 0; i < k1; ++i){ index_in_[inv[v]].spt_v[i] = tmp_idx_in[v][i].first; index_in_[inv[v]].spt_d[i] = tmp_idx_in[v][i].second; } sort(tmp_idx_out[v].begin(), tmp_idx_out[v].end()); for (int i = 0; i < k2; ++i) { index_out_[inv[v]].spt_v[i] = tmp_idx_out[v][i].first; index_out_[inv[v]].spt_d[i] = tmp_idx_out[v][i].second; } tmp_idx_in[v].clear(); tmp_idx_in[v].clear(); tmp_idx_out[v].clear(); tmp_idx_out[v].clear(); } } time_indexing_ += GetCurrentTimeSec(); return true; } int PrunedLandmarkLabeling ::QueryDistance(int v, int w) { if (v >= num_v_ || w >= num_v_) return v == w ? 0 : INT_MAX; // INT_MAX = 2147483647 const index_t &idx_v = index_out_[v]; const index_t &idx_w = index_in_[w]; int d = INF8; _mm_prefetch(&idx_v.spt_v[0], _MM_HINT_T0); _mm_prefetch(&idx_w.spt_v[0], _MM_HINT_T0); _mm_prefetch(&idx_v.spt_d[0], _MM_HINT_T0); _mm_prefetch(&idx_w.spt_d[0], _MM_HINT_T0); // std::cout << "query solving:" << std::endl; for (int i1 = 0, i2 = 0; ; ) { int v1 = idx_v.spt_v[i1], v2 = idx_w.spt_v[i2]; // std::cout << v1 << " " << v2 << std::endl; if (v1 == v2) { if (v1 == num_v_) break; // Sentinel int td = idx_v.spt_d[i1] + idx_w.spt_d[i2]; if (td < d) d = td; ++i1; ++i2; } else { i1 += v1 < v2 ? 1 : 0; i2 += v1 > v2 ? 1 : 0; } } if (d >= INF8 - 2) d = INT_MAX; return d; } bool PrunedLandmarkLabeling ::LoadIndex(const char *filename) { std::ifstream ifs(filename); return ifs && LoadIndex(ifs); } bool PrunedLandmarkLabeling ::LoadIndex(std::istream &ifs) { Free(); int32_t num_v; ifs.read((char*)&num_v, sizeof(num_v)); num_v_ = num_v; if (ifs.bad()) { num_v_ = 0; return false; } index_in_ = (index_t*)memalign(64, num_v * sizeof(index_t)); index_out_ = (index_t*)memalign(64, num_v * sizeof(index_t)); if (index_in_ == NULL || index_out_ == NULL) { num_v_ = 0; return false; } for (int v = 0; v < num_v_; ++v) { index_in_[v].spt_v = NULL; index_in_[v].spt_d = NULL; index_out_[v].spt_v = NULL; index_out_[v].spt_d = NULL; } for (int v = 0; v < num_v_; ++v) { index_t &idx_in = index_in_[v]; index_t &idx_out = index_out_[v]; int32_t s1; ifs.read((char*)&s1, sizeof(s1)); if (ifs.bad()) { Free(); return false; } idx_in.spt_v = (uint32_t*)memalign(64, s1 * sizeof(uint32_t)); idx_in.spt_d = (uint8_t *)memalign(64, s1 * sizeof(uint8_t )); if (!idx_in.spt_v || !idx_in.spt_d) { Free(); return false; } for (int i = 0; i < s1; ++i) { ifs.read((char*)&idx_in.spt_v[i], sizeof(idx_in.spt_v[i])); ifs.read((char*)&idx_in.spt_d[i], sizeof(idx_in.spt_d[i])); } int32_t s2; ifs.read((char*)&s2, sizeof(s2)); if (ifs.bad()) { Free(); return false; } idx_out.spt_v = (uint32_t*)memalign(64, s2 * sizeof(uint32_t)); idx_out.spt_d = (uint8_t *)memalign(64, s2 * sizeof(uint8_t )); if (!idx_out.spt_v || !idx_out.spt_d) { Free(); return false; } for (int i = 0; i < s2; ++i) { ifs.read((char*)&idx_out.spt_v[i], sizeof(idx_out.spt_v[i])); ifs.read((char*)&idx_out.spt_d[i], sizeof(idx_out.spt_d[i])); } } return ifs.good(); } bool PrunedLandmarkLabeling ::StoreIndex(const char *filename) { std::ofstream ofs(filename); return ofs && StoreIndex(ofs); } bool PrunedLandmarkLabeling ::StoreIndex(std::ostream &ofs) { uint32_t num_v = num_v_; ofs.write((const char*)&num_v, sizeof(num_v)); for (int v = 0; v < num_v_; ++v) { index_t &idx_in = index_in_[v]; index_t &idx_out = index_out_[v]; int32_t s1; for (s1 = 1; idx_in.spt_v[s1 - 1] != num_v; ++s1) continue; // Find the sentinel ofs.write((const char*)&s1, sizeof(s1)); for (int i = 0; i < s1; ++i) { int32_t l = idx_in.spt_v[i]; int8_t d = idx_in.spt_d[i]; ofs.write((const char*)&l, sizeof(l)); ofs.write((const char*)&d, sizeof(d)); } int32_t s2; for (s2 = 1; idx_out.spt_v[s2 - 1] != num_v; ++s2) continue; // Find the sentinel ofs.write((const char*)&s2, sizeof(s2)); for (int i = 0; i < s2; ++i) { int32_t l = idx_out.spt_v[i]; int8_t d = idx_out.spt_d[i]; ofs.write((const char*)&l, sizeof(l)); ofs.write((const char*)&d, sizeof(d)); } } return ofs.good(); } void PrunedLandmarkLabeling ::Free() { for (int v = 0; v < num_v_; ++v) { free(index_in_[v].spt_v); free(index_in_[v].spt_d); free(index_out_[v].spt_v); free(index_out_[v].spt_d); } free(index_in_); free(index_out_); index_in_ = NULL; index_out_ = NULL; num_v_ = 0; } void PrunedLandmarkLabeling ::PrintStatistics() { std::cout << "load time: " << time_load_ << " seconds" << std::endl; std::cout << "indexing time: " << time_indexing_ << " seconds" << std::endl; double s = 0.0; for (int v = 0; v < num_v_; ++v) { for (int i = 0; index_in_[v].spt_v[i] != uint32_t(num_v_); ++i) { ++s; } for (int i = 0; index_out_[v].spt_v[i] != uint32_t(num_v_); ++i) { ++s; } } s /= num_v_; std::cout << "average normal label size: " << s << std::endl; } int PrunedLandmarkLabeling ::dfs(int s, int t, int step, int ele){ if (step >= constrain) return count; // std::cout << "step: " << step << " ele: " << ele << std::endl; stack[step] = ele; visited[ele] = true; for (int i = 0; ; ++i) { int w = adj[ele].nb[i]; // std::cout << "w: " << w << std::endl; if (w == num_v_ + 1) break; else if (w == t){ stack[step + 1] = t; count++; } else if (!visited[w]) { dfs(s, t, step + 1, w); } } visited[ele] = false; return count; } int PrunedLandmarkLabeling ::parallel_dfs(int s, int t, int step, int ele){ omp_set_num_threads(8); if (step >= constrain) return count; // std::cout << "step: " << step << " ele: " << ele << std::endl; for (int i = 0; i < NumThreads; i++){ count_sum[i] = 0; } stack[step] = ele; visited[ele] = true; #pragma omp parallel firstprivate(stack, visited) { long id = omp_get_thread_num(); int j; for (j = 0; ; ++j) { int w = adj[ele].nb[j]; if (w == num_v_ + 1) break; } #pragma omp for for (int i = 0; i < j ; ++i) { int w = adj[ele].nb[i]; // std::cout << "w: " << w << std::endl; if (w == t){ stack[step + 1] = t; count_sum[id]++; } else if (!visited[w]) { para_dfs(s, t, step + 1, w, id); } } } visited[ele] = false; for (int i = 0; i < NumThreads; i++){ count += count_sum[i]; } return count; } int PrunedLandmarkLabeling ::para_dfs(int s, int t, int step, int ele, long id){ if (step >= constrain) return count; // std::cout << "step: " << step << " ele: " << ele << std::endl; stack[step] = ele; visited[ele] = true; for (int i = 0; ; ++i) { int w = adj[ele].nb[i]; // std::cout << "w: " << w << std::endl; if (w == num_v_ + 1) break; else if (w == t){ stack[step + 1] = t; count_sum[id]++; } else if (!visited[w]) { para_dfs(s, t, step + 1, w, id); } } visited[ele] = false; return count; } int PrunedLandmarkLabeling ::DistanceCheck(int s, int t){ if (s >= num_v_ || t >= num_v_) return s == t ? 0 : INT_MAX; // INT_MAX = 2147483647 std::vector<int> que(num_v_); // queue std::vector<bool> vis(num_v_); int que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = s; que_t1 = que_h; for (uint8_t d = 0; que_t0 < que_h && d <= INF8; ++d) { for (int que_i = que_t0; que_i < que_t1; ++que_i) { int v = que[que_i]; if (v == t){ return d; }else{ for (int i = 0; ; ++i) { int w = adj[v].nb[i]; if (w == num_v_ + 1) break; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } } } que_t0 = que_t1; que_t1 = que_h; } return INT_MAX; } #endif // PRUNED_LANDMARK_LABELING_H_
GB_binop__rminus_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fp64) // A*D function (colscale): GB (_AxD__rminus_fp64) // D*A function (rowscale): GB (_DxB__rminus_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fp64) // C=scalar+B GB (_bind1st__rminus_fp64) // C=scalar+B' GB (_bind1st_tran__rminus_fp64) // C=A+scalar GB (_bind2nd__rminus_fp64) // C=A'+scalar GB (_bind2nd_tran__rminus_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FP64 || GxB_NO_RMINUS_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_bool_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_bool_uint32 // op(A') function: GB_unop_tran__identity_bool_uint32 // C type: bool // A type: uint32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_bool_uint32 ( bool *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dynprog.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is int, default size is 50. */ #include "dynprog.h" /* Array initialization. */ static void init_array(int length, DATA_TYPE POLYBENCH_2D(c,LENGTH,LENGTH,length,length), DATA_TYPE POLYBENCH_2D(W,LENGTH,LENGTH,length,length)) { int i, j; for (i = 0; i < length; i++) for (j = 0; j < length; j++) { c[i][j] = i*j % 2; W[i][j] = ((DATA_TYPE) i-j) / length; } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(DATA_TYPE out) { fprintf (stderr, DATA_PRINTF_MODIFIER, out); fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_dynprog(int tsteps, int length, DATA_TYPE POLYBENCH_2D(c,LENGTH,LENGTH,length,length), DATA_TYPE POLYBENCH_2D(W,LENGTH,LENGTH,length,length), DATA_TYPE POLYBENCH_3D(sum_c,LENGTH,LENGTH,LENGTH,length,length,length), DATA_TYPE *out) { int iter, i, j, k; DATA_TYPE out_l = 0; #pragma scop #pragma omp parallel { #pragma omp master { for (iter = 0; iter < _PB_TSTEPS; iter++) { #pragma omp for private (j) for (i = 0; i <= _PB_LENGTH - 1; i++) for (j = 0; j <= _PB_LENGTH - 1; j++) c[i][j] = 0; #pragma omp for private (j, k) for (i = 0; i <= _PB_LENGTH - 2; i++) { for (j = i + 1; j <= _PB_LENGTH - 1; j++) { sum_c[i][j][i] = 0; for (k = i + 1; k <= j-1; k++) sum_c[i][j][k] = sum_c[i][j][k - 1] + c[i][k] + c[k][j]; c[i][j] = sum_c[i][j][j-1] + W[i][j]; } } out_l += c[0][_PB_LENGTH - 1]; } } } #pragma endscop *out = out_l; } int main(int argc, char** argv) { /* Retrieve problem size. */ int length = LENGTH; int tsteps = TSTEPS; /* Variable declaration/allocation. */ DATA_TYPE out; POLYBENCH_3D_ARRAY_DECL(sum_c,DATA_TYPE,LENGTH,LENGTH,LENGTH,length,length,length); POLYBENCH_2D_ARRAY_DECL(c,DATA_TYPE,LENGTH,LENGTH,length,length); POLYBENCH_2D_ARRAY_DECL(W,DATA_TYPE,LENGTH,LENGTH,length,length); /* Initialize array(s). */ init_array (length, POLYBENCH_ARRAY(c), POLYBENCH_ARRAY(W)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_dynprog (tsteps, length, POLYBENCH_ARRAY(c), POLYBENCH_ARRAY(W), POLYBENCH_ARRAY(sum_c), &out); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(out)); /* Be clean. */ POLYBENCH_FREE_ARRAY(sum_c); POLYBENCH_FREE_ARRAY(c); POLYBENCH_FREE_ARRAY(W); return 0; }
centralized_sensereversal.c
/* * CENTRALIZED SENSE REVERSAL BARRIER: OPENMP * To show correct functionality of barrier: Uncomment printf in main * To compile: gcc -o centralized_sensereversal centralized_sensereversal.c -lm -fopenmp * To run: ./centralized_sensereversal [num_threads num_barriers] */ #include <omp.h> #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #include <sys/time.h> bool globalSense; bool *localSense; int startcount; int P, N; int FetchAndDecrementCount(); void SenseReversalBarrier_Init() { startcount = P; localSense = (bool*) malloc(sizeof(bool)*(P)); int i; for (i = 0; i < P; ++i) localSense[i] = true; globalSense = true; } void SenseReversalBarrier(int thread_num) { localSense[thread_num] = !localSense[thread_num]; // Toggle private sense variable if (FetchAndDecrementCount() == 1) { startcount = P; globalSense = 1 - globalSense; } else { while (globalSense != localSense[thread_num]) { } // Spin } } //Gets the current count and decrements it. Also returns the current count int FetchAndDecrementCount() { int myCount; #pragma omp critical { myCount = startcount; startcount--; } return myCount; } int main(int argc, char **argv) { int thread_num = -1; if (argc==3){ if (sscanf (argv[1], "%d", &P)!=1) printf ("P - not an integer\n"); if (sscanf (argv[2], "%d", &N)!=1) printf ("N - not an integer\n"); } else {P = 4; N = 2;} struct timeval tv1, tv2; double total_time; SenseReversalBarrier_Init(); gettimeofday(&tv1, NULL); #pragma omp parallel num_threads(P) shared(N) firstprivate(thread_num) { int i; thread_num = omp_get_thread_num(); for (i = 0; i < N; ++i) { // printf("\n========Thread %d entered barrier %d=======",thread_num, i); SenseReversalBarrier(thread_num); // printf("\n========Thread %d entered barrier %d=======",thread_num, i); SenseReversalBarrier(thread_num); // printf("\n========Thread %d entered barrier %d=======",thread_num, i); SenseReversalBarrier(thread_num); // printf("\n========Thread %d entered barrier %d=======",thread_num, i); SenseReversalBarrier(thread_num); // printf("\n========Thread %d entered barrier %d=======",thread_num, i); SenseReversalBarrier(thread_num); } } gettimeofday(&tv2, NULL); total_time = (double) (tv2.tv_usec - tv1.tv_usec) + (double) (tv2.tv_sec - tv1.tv_sec)*1000000; printf("\nSUMMARY:\nTotal run-time for %d " "loops with 5 barriers per loop: %fs\n" "The average time per barrier: %fus\n", N, total_time/1000000, (double)(total_time/(N*5))); return 0; }
GB_binop__iseq_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__iseq_uint16 // A.*B function (eWiseMult): GB_AemultB__iseq_uint16 // A*D function (colscale): GB_AxD__iseq_uint16 // D*A function (rowscale): GB_DxB__iseq_uint16 // C+=B function (dense accum): GB_Cdense_accumB__iseq_uint16 // C+=b function (dense accum): GB_Cdense_accumb__iseq_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_uint16 // C=scalar+B GB_bind1st__iseq_uint16 // C=scalar+B' GB_bind1st_tran__iseq_uint16 // C=A+scalar GB_bind2nd__iseq_uint16 // C=A'+scalar GB_bind2nd_tran__iseq_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x == y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_UINT16 || GxB_NO_ISEQ_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__iseq_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__iseq_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__iseq_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__iseq_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__iseq_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__iseq_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__iseq_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__iseq_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__iseq_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB_bind1st_tran__iseq_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB_bind2nd_tran__iseq_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
func_1v.c
void func_1v(float* in, float* out, unsigned n){ unsigned i; #pragma omp target teams distribute parallel for map(to: in[0:n]) map(from: out[0:n]) for(i=0; i<n; ++i){ out[i]=2*in[i]; } }
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/memory_.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resample.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImage() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImage method is: % % MagickBooleanType CompositeImage(Image *image, % const Image *source_image,const CompositeOperator compose, % const MagickBooleanType clip_to_self,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o source_image: the source image. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o clip_to_self: set to MagickTrue to limit composition to area composed. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o exception: return any errors or warnings in this structure. % */ /* Composition based on the SVG specification: A Composition is defined by... Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc) Y = 1 for source preserved Z = 1 for canvas preserved Conversion to transparency (then optimized) Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) Where... Sca = Sc*Sa normalized Source color divided by Source alpha Dca = Dc*Da normalized Dest color divided by Dest alpha Dc' = Dca'/Da' the desired color value for this channel. Da' in in the follow formula as 'gamma' The resulting alpla value. Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in the following optimizations... gamma = Sa+Da-Sa*Da; gamma = 1 - QuantiumScale*alpha * QuantiumScale*beta; opacity = QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma The above SVG definitions also definate that Mathematical Composition methods should use a 'Over' blending mode for Alpha Channel. It however was not applied for composition modes of 'Plus', 'Minus', the modulus versions of 'Add' and 'Subtract'. Mathematical operator changes to be applied from IM v6.7... 1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed 'ModulusAdd' and 'ModulusSubtract' for clarity. 2) All mathematical compositions work as per the SVG specification with regard to blending. This now includes 'ModulusAdd' and 'ModulusSubtract'. 3) When the special channel flag 'sync' (syncronize channel updates) is turned off (enabled by default) then mathematical compositions are only performed on the channels specified, and are applied independantally of each other. In other words the mathematics is performed as 'pure' mathematical operations, rather than as image operations. */ static void HCLComposite(const MagickRealType hue,const MagickRealType chroma, const MagickRealType luma,MagickRealType *red,MagickRealType *green, MagickRealType *blue) { MagickRealType b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma, MagickRealType *luma) { MagickRealType b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (MagickRealType *) NULL); assert(chroma != (MagickRealType *) NULL); assert(luma != (MagickRealType *) NULL); r=red; g=green; b=blue; max=MagickMax(r,MagickMax(g,b)); c=max-(MagickRealType) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == max) h=fmod((g-b)/c+6.0,6.0); else if (green == max) h=((b-r)/c)+2.0; else if (blue == max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static MagickBooleanType CompositeOverImage(Image *image, const Image *source_image,const MagickBooleanType clip_to_self, const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *image_view, *source_view; const char *value; MagickBooleanType clamp, status; MagickOffsetType progress; ssize_t y; /* Composite image. */ status=MagickTrue; progress=0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); alpha=Sa+Da-Sa*Da; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((source_traits == UndefinedPixelTrait) && (channel != AlphaPixelChannel)) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ pixel=QuantumRange*alpha; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=Sc; continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; gamma=PerceptibleReciprocal(alpha); pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType CompositeImage(Image *image, const Image *composite,const CompositeOperator compose, const MagickBooleanType clip_to_self,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, status; MagickOffsetType progress; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); (void) SetImageColorspace(source_image,image->colorspace,exception); if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp)) { status=CompositeOverImage(image,source_image,clip_to_self,x_offset, y_offset,exception); source_image=DestroyImage(source_image); return(status); } amount=0.5; canvas_image=(Image *) NULL; canvas_dissolve=1.0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); SetGeometryInfo(&geometry_info); percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if (traits == UndefinedPixelTrait) continue; if (source_traits != UndefinedPixelTrait) SetPixelChannel(image,channel,p[i],q); else if (channel == AlphaPixelChannel) SetPixelChannel(image,channel,OpaqueAlpha,q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case IntensityCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } SetPixelAlpha(image,clamp != MagickFalse ? ClampPixel(GetPixelIntensity(source_image,p)) : ClampToQuantum(GetPixelIntensity(source_image,p)),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyAlphaCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case BlurCompositeOp: { CacheView *canvas_view; MagickRealType angle_range, angle_start, height, width; PixelInfo pixel; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (const char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "InvalidSetting","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* Default the unrotated ellipse width and height axis vectors. */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter); /* do the variable blurring of each pixel in image */ GetPixelInfo(image,&pixel); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } if (fabs((double) angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale* GetPixelBlue(source_image,p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { (void) fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",blur.x1, blur.x2,blur.y1, blur.y2); (void) fprintf(stderr, "scaled by=%lf,%lf\n",QuantumScale* GetPixelRed(p),QuantumScale*GetPixelGreen(p)); #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(source_image,p), blur.y1*QuantumScale*GetPixelGreen(source_image,p), blur.x2*QuantumScale*GetPixelRed(source_image,p), blur.y2*QuantumScale*GetPixelGreen(source_image,p) ); (void) ResamplePixelColor(resample_filter,(double) x_offset+x, (double) y_offset+y,&pixel,exception); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view; MagickRealType horizontal_scale, vertical_scale; PixelInfo pixel; PointInfo center, offset; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=(MagickRealType) ((image->columns-1)/2.0); else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) != 0) center.x=geometry_info.xi; else center.x=(MagickRealType) (x_offset+geometry_info.xi); if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=(MagickRealType) ((image->rows-1)/2.0); else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } /* Displace the offset. */ offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0); offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0); (void) InterpolatePixelInfo(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)* (QuantumScale*GetPixelAlpha(source_image,p)); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } /* Composite image. */ status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; MagickRealType blue, chroma, green, hue, luma, red; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } hue=0.0; chroma=0.0; luma=0.0; GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; switch (compose) { case AlphaCompositeOp: case ChangeMaskCompositeOp: case CopyAlphaCompositeOp: case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case OutCompositeOp: case SrcInCompositeOp: case SrcOutCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; break; } case ClearCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=0.0; break; } case BlendCompositeOp: case DissolveCompositeOp: { if (channel == AlphaPixelChannel) pixel=canvas_dissolve*GetPixelAlpha(source_image,source); else pixel=(MagickRealType) source[channel]; break; } default: { pixel=(MagickRealType) source[channel]; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); switch (compose) { case BumpmapCompositeOp: { alpha=GetPixelIntensity(source_image,p)*Sa; break; } case ColorBurnCompositeOp: case ColorDodgeCompositeOp: case DarkenCompositeOp: case DifferenceCompositeOp: case DivideDstCompositeOp: case DivideSrcCompositeOp: case ExclusionCompositeOp: case HardLightCompositeOp: case HardMixCompositeOp: case LinearBurnCompositeOp: case LinearDodgeCompositeOp: case LinearLightCompositeOp: case LightenCompositeOp: case MathematicsCompositeOp: case MinusDstCompositeOp: case MinusSrcCompositeOp: case ModulusAddCompositeOp: case ModulusSubtractCompositeOp: case MultiplyCompositeOp: case OverlayCompositeOp: case PegtopLightCompositeOp: case PinLightCompositeOp: case ScreenCompositeOp: case SoftLightCompositeOp: case VividLightCompositeOp: { alpha=RoundToUnity(Sa+Da-Sa*Da); break; } case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case SrcInCompositeOp: { alpha=Sa*Da; break; } case DissolveCompositeOp: { alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+ canvas_dissolve*Da; break; } case DstOverCompositeOp: case OverCompositeOp: case SrcOverCompositeOp: { alpha=Sa+Da-Sa*Da; break; } case DstOutCompositeOp: { alpha=Da*(1.0-Sa); break; } case OutCompositeOp: case SrcOutCompositeOp: { alpha=Sa*(1.0-Da); break; } case BlendCompositeOp: case PlusCompositeOp: { alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da); break; } case XorCompositeOp: { alpha=Sa+Da-2.0*Sa*Da; break; } default: { alpha=1.0; break; } } if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } switch (compose) { case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case ModulateCompositeOp: case SaturateCompositeOp: { GetPixelInfoPixel(source_image,p,&source_pixel); GetPixelInfoPixel(image,q,&canvas_pixel); break; } default: break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel, sans; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((source_traits == UndefinedPixelTrait) && (channel != AlphaPixelChannel)) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case CopyBlackCompositeOp: case CopyBlueCompositeOp: case CopyCyanCompositeOp: case CopyGreenCompositeOp: case CopyMagentaCompositeOp: case CopyRedCompositeOp: case CopyYellowCompositeOp: case SrcAtopCompositeOp: case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Da; break; } case ChangeMaskCompositeOp: { MagickBooleanType equivalent; if (Da < 0.5) { pixel=(MagickRealType) TransparentAlpha; break; } equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q); if (equivalent != MagickFalse) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) OpaqueAlpha; break; } case ClearCompositeOp: { pixel=(MagickRealType) TransparentAlpha; break; } case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Da; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Sa; break; } if (Sa < Da) { pixel=QuantumRange*Da; break; } pixel=QuantumRange*Sa; break; } case CopyAlphaCompositeOp: { if (source_image->alpha_trait == UndefinedPixelTrait) pixel=GetPixelIntensity(source_image,p); else pixel=QuantumRange*Sa; break; } case CopyCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: case DstAtopCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sa; break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case LightenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case ModulateCompositeOp: { pixel=QuantumRange*Da; break; } default: { pixel=QuantumRange*alpha; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=Sc; continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; switch (compose) { case DarkenCompositeOp: case LightenCompositeOp: case ModulusSubtractCompositeOp: { gamma=PerceptibleReciprocal(1.0-alpha); break; } default: { gamma=PerceptibleReciprocal(alpha); break; } } pixel=Dc; switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case SrcAtopCompositeOp: { pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa)); break; } case BlendCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc); break; } case BlurCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sca; break; } case DisplaceCompositeOp: case DistortCompositeOp: { pixel=Sc; break; } case BumpmapCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc; break; } case ChangeMaskCompositeOp: { pixel=Dc; break; } case ClearCompositeOp: { pixel=0.0; break; } case ColorBurnCompositeOp: { if ((Sca == 0.0) && (Dca == Da)) { pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa)); break; } if (Sca == 0.0) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-Dca/Da)*Sa/ Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorDodgeCompositeOp: { if ((Sca*Da+Dca*Sa) >= Sa*Da) pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); else pixel=QuantumRange*gamma*(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca* (1.0-Sa)); break; } case ColorizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &sans,&sans,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case CopyAlphaCompositeOp: { pixel=Dc; break; } case CopyBlackCompositeOp: { if (channel == BlackPixelChannel) pixel=(MagickRealType) (QuantumRange- GetPixelBlack(source_image,p)); break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { if (channel == BluePixelChannel) pixel=(MagickRealType) GetPixelBlue(source_image,p); break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { if (channel == GreenPixelChannel) pixel=(MagickRealType) GetPixelGreen(source_image,p); break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case DarkenCompositeOp: { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ if ((Sca*Da) < (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case DifferenceCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa)); break; } case DissolveCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa* canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc); break; } case DivideDstCompositeOp: { if ((fabs((double) Sca) < MagickEpsilon) && (fabs((double) Dca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (fabs((double) Dca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case DivideSrcCompositeOp: { if ((fabs((double) Dca) < MagickEpsilon) && (fabs((double) Sca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } if (fabs((double) Sca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa/Sca+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } case DstAtopCompositeOp: { pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da)); break; } case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Dca; break; } case DstInCompositeOp: { pixel=QuantumRange*(Dca*Sa); break; } case DstOutCompositeOp: { pixel=QuantumRange*(Dca*(1.0-Sa)); break; } case DstOverCompositeOp: { pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da)); break; } case ExclusionCompositeOp: { pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0- Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardMixCompositeOp: { pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange); break; } case HueCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&sans,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case InCompositeOp: case SrcInCompositeOp: { pixel=QuantumRange*(Sca*Da); break; } case LinearBurnCompositeOp: { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da); break; } case LinearDodgeCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc); break; } case LinearLightCompositeOp: { /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca); break; } case LightenCompositeOp: { if ((Sca*Da) > (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case LightenIntensityCompositeOp: { /* Lighten is equivalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case LuminizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&sans,&luma); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case MathematicsCompositeOp: { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+ geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+ geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case MinusDstCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa); break; } case MinusSrcCompositeOp: { /* Minus source from canvas. f(Sc,Dc) = Sc - Dc */ pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da); break; } case ModulateCompositeOp: { ssize_t offset; if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint); if (offset == 0) { pixel=Dc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ModulusAddCompositeOp: { pixel=Sc+Dc; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa)); break; } case ModulusSubtractCompositeOp: { pixel=Sc-Dc; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa)); break; } case MultiplyCompositeOp: { pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case OutCompositeOp: case SrcOutCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)); break; } case OverCompositeOp: case SrcOverCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); break; } case OverlayCompositeOp: { if ((2.0*Dca) < Da) { pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0- Da)); break; } pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+ Sca*(1.0-Da)); break; } case PegtopLightCompositeOp: { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs((double) Da) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sca); break; } pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0- Da)+Dca*(1.0-Sa)); break; } case PinLightCompositeOp: { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if ((Dca*Sa) < (Da*(2.0*Sca-Sa))) { pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); break; } if ((Dca*Sa) > (2.0*Sca*Da)) { pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca); break; } case PlusCompositeOp: { pixel=QuantumRange*(Sca+Dca); break; } case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ScreenCompositeOp: { /* Screen: a negated multiply: f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca); break; } case SoftLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-(Dca/Da)))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*(Dca/Da)* (4.0*(Dca/Da)+1.0)*((Dca/Da)-1.0)+7.0*(Dca/Da))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow((Dca/Da),0.5)- (Dca/Da))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ThresholdCompositeOp: { MagickRealType delta; delta=Sc-Dc; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) { pixel=gamma*Dc; break; } pixel=gamma*(Dc+delta*amount); break; } case VividLightCompositeOp: { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs((double) Sa) < MagickEpsilon) || (fabs((double) (Sca-Sa)) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if ((2.0*Sca) <= Sa) { pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)/(2.0*Sca))+Sca* (1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa/(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca* (1.0-Sa)); break; } case XorCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } default: { pixel=Sc; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o texture_image: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture, ExceptionInfo *exception) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace,exception); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod, exception); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->alpha_trait != UndefinedPixelTrait) || (texture_image->alpha_trait != UndefinedPixelTrait))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,texture_image,image->compose, MagickTrue,x+texture_image->tile_offset.x,y+ texture_image->tile_offset.y,exception); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(texture_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *p, *pixels; register ssize_t x; register Quantum *q; size_t width; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x, (y+texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { register ssize_t j; p=pixels; width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; for (j=0; j < (ssize_t) width; j++) { register ssize_t i; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++) { PixelChannel channel = GetPixelChannelChannel(texture_image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait texture_traits=GetPixelChannelTraits(texture_image, channel); if ((traits == UndefinedPixelTrait) || (texture_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); } } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
LinearNearestNeighbors.h
// // Copyright (c) 2009, Markus Rickert // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // #ifndef RL_MATH_LINEARNEARESTNEIGHBORS_H #define RL_MATH_LINEARNEARESTNEIGHBORS_H #include <algorithm> #include <vector> namespace rl { namespace math { /** * Linear nearest neighbor search. */ template<typename MetricT, typename ContainerT = ::std::vector<typename MetricT::Value>> class LinearNearestNeighbors { public: typedef typename ContainerT::const_iterator const_iterator; typedef const typename MetricT::Value& const_reference; typedef typename ContainerT::difference_type difference_type; typedef typename ContainerT::iterator iterator; typedef typename MetricT::Value& reference; typedef typename ContainerT::size_type size_type; typedef typename MetricT::Value value_type; typedef ContainerT Container; typedef typename MetricT::Distance Distance; typedef MetricT Metric; typedef typename MetricT::Value Value; typedef ::std::pair<Distance, Value> Neighbor; explicit LinearNearestNeighbors(const Metric& metric) : container(), metric(metric) { } explicit LinearNearestNeighbors(Metric&& metric = Metric()) : container(), metric(::std::move(metric)) { } template<typename InputIterator> LinearNearestNeighbors(InputIterator first, InputIterator last, const Metric& metric) : container(first, last), metric(metric) { } template<typename InputIterator> LinearNearestNeighbors(InputIterator first, InputIterator last, Metric&& metric = Metric()) : container(first, last), metric(::std::move(metric)) { } ~LinearNearestNeighbors() { } Value& at(const ::std::size_t& i) { return this->container.at(i); } const Value& at(const ::std::size_t& i) const { return this->container.at(i); } Value& back() { return this->container.back(); } const Value& back() const { return this->container.back(); } iterator begin() { return this->container.begin(); } const_iterator begin() const { return this->container.begin(); } ::std::size_t capacity() const { return this->container.capacity(); } const_iterator cbegin() const { return this->container.cbegin(); } const_iterator cend() const { return this->container.cend(); } void clear() { this->container.clear(); } bool empty() const { return this->container.empty(); } iterator end() { return this->container.end(); } const_iterator end() const { return this->container.end(); } void erase(const_iterator pos) { this->container.erase(pos); } Value& front() { return this->container.front(); } const Value& front() const { return this->container.front(); } template<typename InputIterator> void insert(InputIterator first, InputIterator last) { this->container.insert(this->container.end(), first, last); } ::std::size_t max_size() const { return this->container.max_size(); } ::std::vector<Neighbor> nearest(const Value& query, const ::std::size_t& k, const bool& sorted = true) const { return this->search(query, &k, nullptr, sorted); } Value& operator[](const ::std::size_t& i) { return this->container[i]; } const Value& operator[](const ::std::size_t& i) const { return this->container[i]; } void push(const Value& value) { this->container.push_back(value); } ::std::vector<Neighbor> radius(const Value& query, const Distance& radius, const bool& sorted = true) const { return this->search(query, nullptr, &radius, sorted); } void reserve(const ::std::size_t& capacity) { this->container.reserve(capacity); } ::std::size_t size() const { return this->container.size(); } void swap(LinearNearestNeighbors& other) { using ::std::swap; swap(this->container, other.container); swap(this->metric, other.metric); } friend void swap(LinearNearestNeighbors& lhs, LinearNearestNeighbors& rhs) { lhs.swap(rhs); } protected: private: struct NeighborCompare { bool operator()(const Neighbor& lhs, const Neighbor& rhs) const { return lhs.first < rhs.first; } }; ::std::vector<Neighbor> search(const Value& query, const ::std::size_t* k, const Distance* radius, const bool& sorted) const { ::std::vector<Neighbor> neighbors; neighbors.reserve(nullptr != k ? *k : this->size()); ::std::vector<Distance> distances(this->container.size()); #pragma omp parallel for #if defined(_OPENMP) && _OPENMP < 200805 for (::std::ptrdiff_t i = 0; i < this->container.size(); ++i) #else for (::std::size_t i = 0; i < this->container.size(); ++i) #endif { distances[i] = this->metric(query, this->container[i]); } for (::std::size_t i = 0; i < this->container.size(); ++i) { if (nullptr == k || neighbors.size() < *k || distances[i] < neighbors.front().first) { if (nullptr == radius || distances[i] < *radius) { if (nullptr != k && *k == neighbors.size()) { ::std::pop_heap(neighbors.begin(), neighbors.end(), NeighborCompare()); neighbors.pop_back(); } #if defined(_MSC_VER) && _MSC_VER < 1800 neighbors.push_back(::std::make_pair(distances[i], this->container[i])); #else neighbors.emplace_back(distances[i], this->container[i]); #endif ::std::push_heap(neighbors.begin(), neighbors.end(), NeighborCompare()); } } } if (sorted) { ::std::sort_heap(neighbors.begin(), neighbors.end(), NeighborCompare()); } return neighbors; } Container container; Metric metric; }; } } #endif // RL_MATH_LINEARNEARESTNEIGHBORS_H
agilekeychain_fmt_plug.c
/* 1Password Agile Keychain cracker patch for JtR. Hacked together during * July of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This software is based on "agilekeychain" project but no actual code is * borrowed from it. * * "agilekeychain" project is at https://bitbucket.org/gwik/agilekeychain */ #if FMT_EXTERNS_H extern struct fmt_main fmt_agile_keychain; #elif FMT_REGISTERS_H john_register_one(&fmt_agile_keychain); #else #include <string.h> #include <errno.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 // tuned on core i7 #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "johnswap.h" #include "options.h" #include "pbkdf2_hmac_sha1.h" #include "aes.h" #include "jumbo.h" #include "memdbg.h" #define FORMAT_LABEL "agilekeychain" #define FORMAT_NAME "1Password Agile Keychain" #define FORMAT_TAG "$agilekeychain$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 AES " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA1 AES 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define SALTLEN 8 #define IVLEN 8 #define CTLEN 1040 static struct fmt_tests agile_keychain_tests[] = { {"$agilekeychain$2*1000*8*7146eaa1cca395e5*1040*e7eb81496717d35f12b83024bb055dec00ea82843886cbb8d0d77302a85d89b1d2c0b5b8275dca44c168cba310344be6eea3a79d559d0846a9501f4a012d32b655047673ef66215fc2eb4e944a9856130ee7cd44523017bbbe2957e6a81d1fd128434e7b83b49b8a014a3e413a1d76b109746468070f03f19d361a21c712ef88e05b04f8359f6dd96c1c4487ea2c9df22ea9029e9bc8406d37850a5ead03062283a42218c134d05ba40cddfe46799c931291ec238ee4c11dc71d2b7e018617d4a2bf95a0c3c1f98ea14f886d94ee2a65871418c7c237f1fe52d3e176f8ddab6dfd4bc039b6af36ab1bc9981689c391e71703e31979f732110b84d5fccccf59c918dfcf848fcd80c6da62ced6e231497b9cbef22d5edca439888556bae5e7b05571ac34ea54fafc03fb93e4bc17264e50a1d04b688fcc8bc715dd237086c2537c32de34bbb8a29de0208800af2a9b561551ae6561099beb61045f22dbe871fab5350e40577dd58b4c8fb1232f3f85b8d2e028e5535fd131988a5df4c0408929b8eac6d751dcc698aa1d79603251d90a216ae5e28bffc0610f61fefe0a23148dcc65ab88b117dd3b8d311157424867eb0261b8b8c5b11def85d434dd4c6dc7036822a279a77ec640b28da164bea7abf8b634ba0e4a13d9a31fdcfebbdbe53adcdf2564d656e64923f76bc2619428abdb0056ce20f47f3ece7d4d11dc55d2969684ca336725561cb27ce0504d57c88a2782daccefb7862b385d494ce70fef93d68e673b12a68ba5b8c93702be832d588ac935dbf0a7b332e42d1b6da5f87aed03498a37bb41fc78fcdbe8fe1f999fe756edf3a375beb54dd508ec45af07985f1430a105e552d9817106ae12d09906c4c28af575d270308a950d05c07da348f59571184088d46bbef3e7a2ad03713e90b435547b23f340f0f5d00149838d9919d40dac9b337920c7e577647fe4e2811f05b8e888e3211d9987cf922883aa6e53a756e579f7dff91c297fcc5cda7d10344545f64099cfd2f8fd59ee5c580ca97cf8b17e0222b764df25a2a52b81ee9db41b3c296fcea1203b367e55d321c3504aeda8913b0cae106ccf736991030088d581468264b8486968e868a44172ad904d97e3e52e8370aaf52732e6ee6cc46eb33a901afc6b7c687b8f6ce0b2b4cdfe19c7139615195a052051becf39383ab83699a383a26f8a36c78887fe27ea7588c0ea21a27357ff9923a3d23ca2fb04ad671b63f8a8ec9b7fc969d3bece0f5ff19a40bc327b9905a6de2193ffe3aa1997e9266205d083776e3b94869164abcdb88d64b8ee5465f7165b75e1632abd364a24bb1426889955b8f0354f75c6fb40e254f7de53d8ef7fee9644bf2ebccd934a72bb1cc9c19d354d66996acbddd60d1241657359d9074a4b313b21af2ee4f10cf20f4122a5fad4ee4f37a682ffb7234bea61985d1ad130bfb9f4714461fb574dbf851c*1000*8*c05f3bc3e7f3cad7*1040*f3e3d091b64da1529b04b2795898b717faad59f7dae4bda25e6e267c28a56a7702e51991b2a3fb034cdda2d9bfd531dfd2c3af00f39fdfe8bcbdde02ab790415bcf071d133b15f647f55ff512730ae4914ce20b72184c827f6350ac768b00c9eab0e3322e084bb3e9e9439a10030950f5504dcc4f7ba614b27fde99bd0d743a58341e90ec313395486eb8068df205b7bdf25134ed97dd2e2883d7eb3e63b659602ada765084a69d7ed8fc55b60aa67718cc9e5bf31ab8f3029b32a4b001071848d2b76b5f4b921d2169ca287e9e78ecd904d040c817c7c7cde4ba8510b462e139c16519962ca0adb7d5f89d431cd4541a9a7aaec8d799697f4d3947d87884bed32ada13db725c72ab6450ac8fe989a94917cca784bcf6ffbe756f19d4e8897e0f80d8c318e13e5b30fc356646aaf038a952b0781f12dfef1f4bd6922ae05a573eeff4dbb064cfbb0fd62962a6a53a8de308da2b8e83baebfe261cb127f874a5eff3f05cda123ab2ba559cf444ce33b6845f4c902733b8982044151a8aa1859769082ade5928f2d4f616ce972ae8dde1f2be37d496ad16057008dfe678c75cbdc53db25ed311edbcf8b2a73bcd2809f6bd1d389aaeed82a75fa15676d08aa5390efdc189c180be6a52ec5a7371304d26e477039197671377d1ea3d6ee41e68a42348a4fe9a1d2400eaeba8ed0a7419b9694d780456d96378c00318a5be0f41afa887476b3bebb7cf30d61ca8fc77de35671a3053a517aa39444e01e1752da3146dc97eec5849d6f025c3d4bc6e0499b901f629d8a081ad35ed33602cbef5e9a68f090170fcc1f285eb094e3dc619740a067fd2aeeb20abbb17926c3ad097f3f0bad4de540d1829a985cd7e700100622ec47da046071c11a1597e5f093268b4ed79ffcf2450b9ba2b649b932fbce912bdb4da010581bd9c731be792c8f75177f6c8c4e1756d63a1491a8aae4bb11beeca118e7d08073b500dd82b81e4bdbeb15625afca8f1c8e06b2360da972587516ef62e91d1d9aad90e62226d53363bff318f5af21f69c234731ac22b09506a1b807d2366e88905668d960c7963daa93046e9a56db1d7a437e9a37aa7a2945197265478b264ec14d383030ef73504fd26d4be9e72ebddb14a00bf6bd66a3adaa1d17cada378a2b0bc852f961af52333f7966f8a60738dfd47e79ce537082f187117ffd31f54f53356b671154dfa245671c4cd054c1a8d303a202fccfae6d3f9e3646838cef38703b5e660b5ce7679f5898d801908f90092dbec335c98e4002041287fe9bfa7d7828a29ab240ec2cedc9fa12cfd7c3ef7b61dad4fbf2ef9c0a904dbde1b3792fb5178607608dc9fc2fbc85addf89fa3df94317e729810b508356b5bb176cdb022afb0ec5eeff4d5081b66733d1be1b54cc4f080bfc33187663b5ab185472b35dc8812e201472e6af376c43ee23aa2db6cd04bddd79b99b0c28c48a5ae", "openwall"}, {"$agilekeychain$1*1000*8*54434b3047723444*1040*316539685a36617546544a61466e35743970356559624464304467394a4a41615459594a6b66454c5462417a7a694b5751474e4748595036344f3945374b414b676b6b7278673658794e63734a316c48656b496a3156346a544c6861797537347032466b4d6b416d31704a6b5063547a44703152544f72696e6e38347732597672774f6476414c70346462595a7678656b6e5958716b7a61746d5874514e575965564735627a437578584e4a573050567939413073306c377a4d726e6d576a6655424455394f4934696c48454f4d536e635567393950686d4171364f76747749446130454c6d74783069704d30456d45374f56736e486a5534667877327a526e52596e55454452393544437042646e6739355938714836584968664c4d7a726a4f63544c6858385141464c71565463664270493761664d633055447879613169456a72664479346438305641417054754775477a475266766c4774543668673848624d31636c37624e73743549634457655375507138535139396c4c39364c4f6f757a43305535586161364b47676a61713971394459526a78744e547459797a6a57715a3575534364487a4430306d4e4e39483277674c733238726463616d4f5146467957374234727252774b6d6161664b6d67414d5854496444665848684c376c6c776d47477a4b57566d5a3646346e775441446f3659745038646d336b6370494d50676742797a41325630716e794833793237494152496477556e4d6c4751497367346672635364486e6e71504f6e6264575953584462586c6e573947347a567163535333366e3253504d65656b45483841544f6952384d6170724471706c4a307863713653707265624f544a4d5139377562454a334b776e4879746a37704e37694557484d69696d436f484973613443754d484b4f51484833545a364654694a6d31783061665536796c444f7257666964397243444f684d305a324c6b75693953716664354b435963703559354978757a64354a755158394136663744435a674e4c73484a7935737a707739724c783077316631637349757a6d696252576244396a537730593143633348385a775734534b646569684f634f4c35323364734b7179625750364b76344a4a56626c4f727069366f575a386432745375684c464e42643173445a6a50745743696e666a4458325058644d57654c596d326f5763516a7951524a566372354d4d58435877765172596b734c59354476455156746d75504830444a4e47624e31524f4d544b4a6b4d675835305a7a56736758794c475057714e78496452725269484c75424f4d6d793550677277727453597045566e304c5642764c5a6732504c7a4e71584c4c67634979637369554a3446497655795a78583547306b365a4e337477786c7961796b4d787463796971596f516fcb3584235d7ecde5f8b7bc2b8f1e9e2e*46c3b75f6e4cf139e92f683f32107271", "123"}, {"$agilekeychain$1*1000*8*7a697868444e7458*1040*773954704874444d4d523043546b44375135544f74675a754532624a45794848305949436e4e724d336c524c39316247426a7843317131614152736d50724c6474586a4d4d445954786c31376d363155437130777a414d36586c7045555457424a5a436a657541456742417961654472745a73576e4b7a7a344d547043567846526655524b4339573631756f3850465a3878306b7176644c4253787071764c58376e716a50674f526d4a4e4b546e3359575175614b304a3964756f756935675a77544f4e6770654855776f79553465786e41364d6376496b7651624762424d62756746796a6753514c37793069783869683773454c533559365946584f545246616d48495730464e634d42466e51367856797a4368517335674a755972434b545944633270764e54775879563542776675386b6e4462506b743138694a756d63447134745361526a32373167366e787375514e346a73574e77796b4b49376d3677653448754c364b5a41514633626e71786130634458544e484a436551386e7679304b786d73346f774a383268665167596b466e39317a307269714434546d4d6173416e344b6a74455a584846526a6659746742504262495958386336755241386c496633417666696d7a5036425745757461736b684574794a5230436d50466d4b536375764674674562315679766a43453077356e614b476d345849395a726b7037626153496b6a66634f355261795157645941487731516f564c6764516d4e3074394b3839526341626f6b6b38324465497068624553646f4177786e6f68347779523338394f4e6561315271635236374d424d695978304b336b4a6966776e74614f4b43483237434b596a6630774e79394a4b7153714a48616b4b364455596a454b31433767786a72303450706d44666373574c5a61324f335852474b756c456b76483349754e3156654f417342324d6f75346d4b78774e43424863566e344c4c6c6c6d4e446b617550415a6f3337764f55484b4156344d4769336267344f4737794c354c5567636a565a6b7369616730383377744d69513431333032305a4a3747794944714d67396a5651444132424e79507a34726d346c333552757a764b6c543073437562534376714f346a5939784a546f683358517348623378716677313231383261685357743236455a6a6b6674365870554642386436574c374430635177347278736a744a6e463530756365684c7779497557366550356936514e704e4863353863437165397163496146794a726555714c623438543235396371416154326c66375276746e3550727453306b7042335961364239586c3359384b464865564e677636537234414e4d6c55583867456376686e43646e6e776a6f656d7152613453725148503462744b4a334565714f6e624a774a65623258552fff2bf0505a0bc88b9cbc9073a74586*a6f6556c971bd3ad40b52751ba025713", ""}, {"$agilekeychain$1*1000*8*7a65613743636950*1040*524a397449393859696b4a576e437763716a574947544a6d306e32474442343355764a7a6948517a45686d7569636631514745347448424e4e6b32564239656a55596f724671547638736d4e66783949504b6f38746b6f49426d4d6b794c7a6d3077327639365a4b515934357774664a477247366b5539486135495863766845714146317458356b725a6a50376f726e55734b3136533756706a4b42516165656a50336e4558616450794f59506f4771347268454730784555485a4f5a4772526a76354f45417470616258375a386436474b366f7653583257335939516d4f5364446a414b674e467a31374f716d73516b3362795776305a414a314f63324d616a6c6472413939443879414c523733794c47467654734d7a6a4734733461674353357a4456527841486233646d446e797448696837377364784344704831784f6a5975666168626b5534796678576c59584d4b3448704a784a4f675a6d7672636b5a4b567071445a345a376648624b55414b7262694972384531336c7a6875725a6f44627571775361774b66417743336230614e4166564954334a6c3477666b4254374f747565394b32667266566d3263416a656c79416c45724b3035504a4e42307a33303632483466664272705765415a4f3552416a36544e5a54415a5976666a4b53675a68493071394a6563426964544a4f564d304a773976394944444339516e564a78587539366974586c4f6132717937354c554b65384b7638585132596832417a5271314e4b5653766d4d50506d3554463762763961554e45695a51436e79504f6e7146617a755231373574455365305446624c636450424a43526a49384b32365967496a734c324e525574526e36714c533065694f536c6c37795a456945476d4a6e327262646942416c485046616e384e4d7869427571777355714e7638305267537752726245696c734d68664b53793836684b39445a716b47546d4b59747176474c6b6a6d52513368796b367a356449706c64385541614236546e426a6b4f64766d33493972763941765a71776345686b734c594a7254446c796f46444b6d557441305a636b414e437245587a63487a30304c50564e4e73694d634d5a6f4f74414534424f53685879374e62545734487a555054774a7056686f6a7453666a664e696d354548345631374c61396862586659666332304e465a5678656a304b4d59586d586547634d67474c6d31794a4b546473474c755a697579625779503259726d6d5248544f6f704b575046556e3438415a48474168396d787136327230367248774e73493439693049794b3765314b4f74547265556c564b6e6d594a5959355a7476334b546f75375a6a676c755a557a39744b54747745583948314a37366e6c6d5a53345079555856696438336876596141617a394438711ee66b990b013609582733309b01df00*444f4656a5ec58e8a75204fb25fd5ae5", "PASSWORD"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static struct custom_salt { unsigned int nkeys; unsigned int iterations[2]; unsigned int saltlen[2]; unsigned char salt[2][SALTLEN]; unsigned int ctlen[2]; unsigned char ct[2][CTLEN]; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); cracked = mem_calloc_align(sizeof(*cracked), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; int ctlen; int saltlen; char *p; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* nkeys */ goto err; if (!isdec(p)) goto err; if (atoi(p) > 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt length */ goto err; if (!isdec(p)) goto err; saltlen = atoi(p); if(saltlen > SALTLEN) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if(strlen(p) != saltlen * 2) goto err; if(!ishexlc(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ct length */ goto err; if (!isdec(p)) goto err; ctlen = atoi(p); if (ctlen > CTLEN) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */ goto err; if(strlen(p) != ctlen * 2) goto err; if(!ishexlc(p)) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; /* skip over "$agilekeychain$" */ p = strtokm(ctcopy, "*"); cs.nkeys = atoi(p); p = strtokm(NULL, "*"); cs.iterations[0] = atoi(p); p = strtokm(NULL, "*"); cs.saltlen[0] = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.saltlen[0]; i++) cs.salt[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.ctlen[0] = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.ctlen[0]; i++) cs.ct[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int akcdecrypt(unsigned char *derived_key, unsigned char *data) { unsigned char out[CTLEN]; int n, key_size; AES_KEY akey; unsigned char iv[16]; memcpy(iv, data + CTLEN - 32, 16); if (AES_set_decrypt_key(derived_key, 128, &akey) < 0) fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n"); AES_cbc_encrypt(data + CTLEN - 16, out + CTLEN - 16, 16, &akey, iv, AES_DECRYPT); n = check_pkcs_pad(out, CTLEN, 16); if (n < 0) return -1; key_size = n / 8; if (key_size != 128 && key_size != 192 && key_size != 256) // "invalid key size" return -1; return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_32 unsigned char master[MAX_KEYS_PER_CRYPT][32]; int lens[MAX_KEYS_PER_CRYPT], i; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; pout[i] = master[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt[0], cur_salt->saltlen[0], cur_salt->iterations[0], pout, 16, 0); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { if(akcdecrypt(master[i], cur_salt->ct[0]) == 0) cracked[i+index] = 1; else cracked[i+index] = 0; } #else unsigned char master[32]; pbkdf2_sha1((unsigned char *)saved_key[index], strlen(saved_key[index]), cur_salt->salt[0], cur_salt->saltlen[0], cur_salt->iterations[0], master, 16, 0); if(akcdecrypt(master, cur_salt->ct[0]) == 0) cracked[index] = 1; else cracked[index] = 0; #endif } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void agile_keychain_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations[0]; } struct fmt_main fmt_agile_keychain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, { "iteration count", }, { FORMAT_TAG }, agile_keychain_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, agile_keychain_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/property.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* Forward declarations. */ static MagickBooleanType TransformsRGBImage(Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o l o r s p a c e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageColorspaceType() returns the potential type of image: % sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To ensure the image type matches its potential, use SetImageColorspaceType(): % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), % exception); % % The format of the GetImageColorspaceType method is: % % ColorspaceType GetImageColorspaceType(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image *image, ExceptionInfo *exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colorspace=image->colorspace; type=IdentifyImageType(image,exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleAlphaType)) colorspace=GRAYColorspace; return(colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + s R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the sRGBTransformImage method is: % % MagickBooleanType sRGBTransformImage(Image *image, % const ColorspaceType colorspace,EsceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertRGBToCMY(const double red,const double green, const double blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const double red,const double green, const double blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLab(const double red,const double green, const double blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static void ConvertRGBToLuv(const double red,const double green, const double blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToxyY(const double red,const double green, const double blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void ConvertRGBToYDbDr(const double red,const double green, const double blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const double red,const double green, const double blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } static void ConvertRGBToYPbPr(const double red,const double green, const double blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const double red,const double green, const double blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const double red,const double green, const double blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static MagickBooleanType sRGBTransformImage(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define sRGBTransformImageTag "RGBTransform/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; switch (colorspace) { case CMYKColorspace: { PixelInfo zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertRGBToCMYK(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(image,ClampToQuantum(GetPixelIntensity(image,q)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to target colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(image,ClampToQuantum(QuantumRange*X),q); SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q); SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=(double) DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=(double) DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q); SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))], q); SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.005382*i; y_map[i].x=0.010566*i; z_map[i].x=0.002052*i; x_map[i].y=(-0.003296)*i; y_map[i].y=(-0.006471)*i; z_map[i].y=0.009768*i; x_map[i].z=0.009410*i; y_map[i].z=(-0.007880)*i; z_map[i].z=(-0.001530)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.298839*(1.099*i-0.099); y_map[i].x=0.586811*(1.099*i-0.099); z_map[i].x=0.114350*(1.099*i-0.099); x_map[i].y=(-0.298839)*(1.099*i-0.099); y_map[i].y=(-0.586811)*(1.099*i-0.099); z_map[i].y=0.88600*(1.099*i-0.099); x_map[i].z=0.70100*(1.099*i-0.099); y_map[i].z=(-0.586811)*(1.099*i-0.099); z_map[i].z=(-0.114350)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; register unsigned int blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(image,q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(image,q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(image,q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ primary_info.z; SetPixelRed(image,ScaleMapToQuantum(pixel.red),q); SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q); SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_sRGBTransformImage) #endif proceed=SetImageProgress(image,sRGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register unsigned int blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red); image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green); image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptiionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) memset(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma=1.000; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.000; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image)) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); type=IdentifyImageGray(image,exception); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { const char *value; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); if (IdentifyImageMonochrome(image,exception) == MagickFalse) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=BilevelType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace, changing the % image data to reflect the new colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(SetImageColorspace(image,colorspace,exception)); (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (colorspace == LinearGRAYColorspace) return(GrayscaleImage(image,Rec709LuminancePixelIntensityMethod,exception)); if (colorspace == GRAYColorspace) return(GrayscaleImage(image,Rec709LumaPixelIntensityMethod,exception)); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace,exception)); /* Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformsRGBImage(image,exception)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformsRGBImage(image,exception); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (sRGBTransformImage(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m s R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformsRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values % to be [0..QuantumRange]. % % The format of the TransformsRGBImage method is: % % MagickBooleanType TransformsRGBImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,double *red,double *green,double *blue) { *red=QuantumRange*(1.0-cyan); *green=QuantumRange*(1.0-magenta); *blue=QuantumRange*(1.0-yellow); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,double *red,double *green,double *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,double *red,double *green,double *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const double value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,double *red,double *green,double *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, double *red,double *green,double *blue) { *red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+ 1.4019995886561440468*(Pr-0.5)); *green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)- 0.71413649331646789076*(Pr-0.5)); *blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+ 2.1453384174593273e-06*(Pr-0.5)); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,double *red,double *green,double *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, double *red,double *green,double *blue) { *red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754* (Q-0.5)); *green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427* (Q-0.5)); *blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374* (Q-0.5)); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, double *red,double *green,double *blue) { *red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5)); *green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5)); *blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5)); } static void ConvertYUVToRGB(const double Y,const double U,const double V, double *red,double *green,double *blue) { *red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825* (V-0.5)); *green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797* (V-0.5)); *blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04* (V-0.5)); } static MagickBooleanType TransformsRGBImage(Image *image, ExceptionInfo *exception) { #define TransformsRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000f }; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; switch (image->colorspace) { case CMYKColorspace: { PixelInfo zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertCMYKToRGB(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: case GRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=(MagickRealType) GetPixelGray(image,q); if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) gray=EncodePixelGamma(gray); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; X=QuantumScale*GetPixelRed(image,q); Y=QuantumScale*GetPixelGreen(image,q); Z=QuantumScale*GetPixelBlue(image,q); switch (image->colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=QuantumRange*X; green=QuantumRange*Y; blue=QuantumRange*Z; break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))]; green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))]; blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))]; SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType) red)),q); SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType) green)),q); SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType) blue)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) 0.0000000; z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) 0.0000000; } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(image,q)); green=ScaleQuantumToMap(GetPixelGreen(image,q)); blue=ScaleQuantumToMap(GetPixelBlue(image,q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformsRGBImage) #endif proceed=SetImageProgress(image,TransformsRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; register size_t blue, green, red; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=(double) ClampToQuantum(pixel.red); image->colormap[i].green=(double) ClampToQuantum(pixel.green); image->colormap[i].blue=(double) ClampToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(MagickTrue); }
yolov2.h
#ifndef YOLOV3 #define YOLOV3 #include <stdio.h> #include <stdlib.h> //#include <iostream> #include <math.h> #include <fcntl.h> #include <string.h> #include <time.h> #include "xconv_hw.h" #include "hw_drivers.h" #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #define FLT_MAX 3.402823466e+38F /* max value */ double what_time_is_it_now() { struct timeval time; if (gettimeofday(&time,NULL)){ return 0; } return (double)time.tv_sec + (double)time.tv_usec * .000001; } //#include "yolo_hls.h" typedef enum{ LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN } ACTIVATION; typedef enum { CONVOLUTIONAL, DECONVOLUTIONAL, CONNECTED, MAXPOOL, SOFTMAX, DETECTION, DROPOUT, CROP, ROUTE, COST, NORMALIZATION, AVGPOOL, LOCAL, SHORTCUT, ACTIVE, RNN, GRU, LSTM, CRNN, BATCHNORM, NETWORK, XNOR, REGION, YOLO, REORG, UPSAMPLE, LOGXENT, L2NORM, BLANK } LAYER_TYPE; struct network; typedef struct network network; struct layer; typedef struct layer layer; struct layer{ LAYER_TYPE type; ACTIVATION activation; void (*forward) (struct layer, struct network); int batch_normalize; int shortcut; int batch; int forced; int flipped; int inputs; int outputs; int nweights; int nbiases; int extra; int truths; int h,w,c; int out_h, out_w, out_c; int n; int max_boxes; int groups; int size; int side; int stride; int reverse; int flatten; int spatial; int pad; int sqrt; int flip; int index; int binary; int xnor; int steps; int hidden; int truth; float smooth; float dot; float angle; float jitter; float saturation; float exposure; float shift; float ratio; float learning_rate_scale; float clip; int softmax; int classes; int coords; int background; int rescore; int objectness; int joint; int noadjust; int reorg; int log; int tanh; int *mask; int total; float alpha; float beta; float kappa; float coord_scale; float object_scale; float noobject_scale; float mask_scale; float class_scale; int bias_match; int random; float ignore_thresh; float truth_thresh; float thresh; float focus; int classfix; int absolute; int onlyforward; int stopbackward; // int dontload; int dontsave; // int dontloadscales; float temperature; float probability; float scale; char * cweights; int * indexes; int * input_layers; int * input_sizes; int * map; float * rand; float * cost; float * state; float * prev_state; float * forgot_state; float * forgot_delta; float * state_delta; float * combine_cpu; float * combine_delta_cpu; float * concat; float * concat_delta; float * binary_weights; float * biases; float * bias_updates; float * scales; float * scale_updates; float * weights; float * weight_updates; float * delta; float * output; float * loss; float * squared; float * norms; float * spatial_mean; float * mean; float * variance; float * mean_delta; float * variance_delta; float * rolling_mean; float * rolling_variance; float * x; float * x_norm; float * m; float * v; float * bias_m; float * bias_v; float * scale_m; float * scale_v; float *z_cpu; float *r_cpu; float *h_cpu; float * prev_state_cpu; float *temp_cpu; float *temp2_cpu; float *temp3_cpu; float *dh_cpu; float *hh_cpu; float *prev_cell_cpu; float *cell_cpu; float *f_cpu; float *i_cpu; float *g_cpu; float *o_cpu; float *c_cpu; float *dc_cpu; float * binary_input; struct layer *input_layer; struct layer *self_layer; struct layer *output_layer; struct layer *reset_layer; struct layer *update_layer; struct layer *state_layer; struct layer *input_gate_layer; struct layer *state_gate_layer; struct layer *input_save_layer; struct layer *state_save_layer; struct layer *input_state_layer; struct layer *state_state_layer; struct layer *input_z_layer; struct layer *state_z_layer; struct layer *input_r_layer; struct layer *state_r_layer; struct layer *input_h_layer; struct layer *state_h_layer; struct layer *wz; struct layer *uz; struct layer *wr; struct layer *ur; struct layer *wh; struct layer *uh; struct layer *uo; struct layer *wo; struct layer *uf; struct layer *wf; struct layer *ui; struct layer *wi; struct layer *ug; struct layer *wg; //tree *softmax_tree; size_t workspace_size; }; void free_layer(layer l) { if(l.cweights) free(l.cweights); if(l.indexes) free(l.indexes); if(l.input_layers) free(l.input_layers); if(l.input_sizes) free(l.input_sizes); if(l.map) free(l.map); if(l.rand) free(l.rand); if(l.cost) free(l.cost); if(l.state) free(l.state); if(l.prev_state) free(l.prev_state); if(l.forgot_state) free(l.forgot_state); if(l.forgot_delta) free(l.forgot_delta); if(l.state_delta) free(l.state_delta); if(l.concat) free(l.concat); if(l.concat_delta) free(l.concat_delta); if(l.binary_weights) free(l.binary_weights); if(l.biases) free(l.biases); if(l.bias_updates) free(l.bias_updates); if(l.scales) free(l.scales); if(l.scale_updates) free(l.scale_updates); if(l.weights) free(l.weights); if(l.weight_updates) free(l.weight_updates); if(l.delta) free(l.delta); if(l.output) free(l.output); if(l.squared) free(l.squared); if(l.norms) free(l.norms); if(l.spatial_mean) free(l.spatial_mean); if(l.mean) free(l.mean); if(l.variance) free(l.variance); if(l.mean_delta) free(l.mean_delta); if(l.variance_delta) free(l.variance_delta); if(l.rolling_mean) free(l.rolling_mean); if(l.rolling_variance) free(l.rolling_variance); if(l.x) free(l.x); if(l.x_norm) free(l.x_norm); if(l.m) free(l.m); if(l.v) free(l.v); if(l.z_cpu) free(l.z_cpu); if(l.r_cpu) free(l.r_cpu); if(l.h_cpu) free(l.h_cpu); if(l.binary_input) free(l.binary_input); } //void free_layer(layer); typedef enum { CONSTANT, STEP, EXP, POLY, STEPS, SIG, RANDOM } learning_rate_policy; typedef struct network{ int n; int batch; size_t *seen; int *t; float epoch; int subdivisions; layer *layers; float *output; learning_rate_policy policy; float learning_rate; float momentum; float decay; float gamma; float scale; float power; int time_steps; int step; int max_batches; float *scales; int *steps; int num_steps; int burn_in; int adam; float B1; float B2; float eps; int inputs; int outputs; int truths; int notruth; int h, w, c; int max_crop; int min_crop; float max_ratio; float min_ratio; int center; float angle; float aspect; float exposure; float saturation; float hue; int random; int gpu_index; // tree *hierarchy; float *input; float *truth; float *delta; float *workspace; int train; int index; float *cost; float clip; } network; network *make_network(int n); layer get_network_output_layer(network *net); typedef struct { int w; int h; float scale; float rad; float dx; float dy; float aspect; } augment_args; typedef struct { int w; int h; int c; float *data; } image; typedef struct{ float x, y, w, h; } box; typedef struct detection{ box bbox; int classes; float *prob; float *mask; float objectness; int sort_class; } detection; typedef struct matrix{ int rows, cols; float **vals; } matrix; typedef struct{ int w, h; matrix X; matrix y; int shallow; int *num_boxes; box **boxes; } data; typedef enum { CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA, LETTERBOX_DATA, REGRESSION_DATA, SEGMENTATION_DATA, INSTANCE_DATA } data_type; typedef struct load_args{ int threads; char **paths; char *path; int n; int m; char **labels; int h; int w; int out_w; int out_h; int nh; int nw; int num_boxes; int min, max, size; int classes; int background; int scale; int center; int coords; float jitter; float angle; float aspect; float saturation; float exposure; float hue; data *d; image *im; image *resized; data_type type; // tree *hierarchy; } load_args; typedef struct{ int id; float x,y,w,h; float left, right, top, bottom; } box_label; //network *load_network(char *cfg, char *weights, int clear); //load_args get_base_args(network *net); //void free_data(data d); typedef struct{ char *key; char *val; int used; } kvp; typedef struct node{ void *val; struct node *next; struct node *prev; } node; typedef struct list{ int size; node *front; node *back; } list; void error(const char *s) { perror(s); assert(0); exit(-1); } void malloc_error() { fprintf(stderr, "Malloc error\n"); exit(-1); } void file_error(char *s) { fprintf(stderr, "Couldn't open file: %s\n", s); exit(0); } /////////////////list begin list *make_list() { list *l = (list *)malloc(sizeof(list)); l->size = 0; l->front = 0; l->back = 0; return l; } void *list_pop(list *l){ if(!l->back) return 0; node *b = l->back; void *val = b->val; l->back = b->prev; if(l->back) l->back->next = 0; free(b); --l->size; return val; } void list_insert(list *l, void *val) { node *new_node = (node *)malloc(sizeof(node)); new_node->val = val; new_node->next = 0; if(!l->back){ l->front = new_node; new_node->prev = 0; }else{ l->back->next = new_node; new_node->prev = l->back; } l->back = new_node; ++l->size; } void free_node(node *n) { node *next; while(n) { next = n->next; free(n); n = next; } } void free_list(list *l) { free_node(l->front); free(l); } void free_list_contents(list *l) { node *n = l->front; while(n){ free(n->val); n = n->next; } } void **list_to_array(list *l) { void **a = (void **)calloc(l->size, sizeof(void*)); int count = 0; node *n = l->front; while(n){ a[count++] = n->val; n = n->next; } return a; } /////////////////list end /////////////////////utils begin void del_arg(int argc, char **argv, int index) { int i; for(i = index; i < argc-1; ++i) argv[i] = argv[i+1]; argv[i] = 0; } int find_arg(int argc, char* argv[], char *arg) { int i; for(i = 0; i < argc; ++i) { if(!argv[i]) continue; if(0==strcmp(argv[i], arg)) { del_arg(argc, argv, i); return 1; } } return 0; } int find_int_arg(int argc, char **argv, char *arg, int def) { int i; for(i = 0; i < argc-1; ++i){ if(!argv[i]) continue; if(0==strcmp(argv[i], arg)){ def = atoi(argv[i+1]); del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } float find_float_arg(int argc, char **argv, char *arg, float def) { int i; for(i = 0; i < argc-1; ++i){ if(!argv[i]) continue; if(0==strcmp(argv[i], arg)){ def = atof(argv[i+1]); del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } char *find_char_arg(int argc, char **argv, char *arg, char *def) { int i; for(i = 0; i < argc-1; ++i){ if(!argv[i]) continue; if(0==strcmp(argv[i], arg)){ def = argv[i+1]; del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } unsigned char *read_file(char *filename) { FILE *fp = fopen(filename, "rb"); size_t size; fseek(fp, 0, SEEK_END); size = ftell(fp); fseek(fp, 0, SEEK_SET); unsigned char *text = (unsigned char *)calloc(size+1, sizeof(unsigned char)); fread(text, 1, size, fp); fclose(fp); return text; } list *split_str(char *s, char delim) { size_t i; size_t len = strlen(s); list *l = make_list(); list_insert(l, s); for(i = 0; i < len; ++i){ if(s[i] == delim){ s[i] = '\0'; list_insert(l, &(s[i+1])); } } return l; } void strip(char *s) { size_t i; size_t len = strlen(s); size_t offset = 0; for(i = 0; i < len; ++i){ char c = s[i]; if(c==' '||c=='\t'||c=='\n') ++offset; else s[i-offset] = c; } s[len-offset] = '\0'; } void strip_char(char *s, char bad) { size_t i; size_t len = strlen(s); size_t offset = 0; for(i = 0; i < len; ++i){ char c = s[i]; if(c==bad) ++offset; else s[i-offset] = c; } s[len-offset] = '\0'; } void free_ptrs(void **ptrs, int n) { int i; for(i = 0; i < n; ++i) free(ptrs[i]); free(ptrs); } char *fgetl(FILE *fp) { if(feof(fp)) return 0; size_t size = 512; char *line = (char *)malloc(size*sizeof(char)); if(!fgets(line, size, fp)){ free(line); return 0; } size_t curr = strlen(line); while((line[curr-1] != '\n') && !feof(fp)){ if(curr == size-1){ size *= 2; line = (char *)realloc(line, size*sizeof(char)); if(!line) { printf("%ld\n", size); malloc_error(); } } size_t readsize = size-curr; if(readsize > INT_MAX) readsize = INT_MAX-1; fgets(&line[curr], readsize, fp); curr = strlen(line); } if(line[curr-1] == '\n') line[curr-1] = '\0'; return line; } /////////////////////utils end ////////////////////option_list begin void option_insert(list *l, char *key, char *val) { kvp *p = (kvp *)malloc(sizeof(kvp)); p->key = key; p->val = val; p->used = 0; list_insert(l, p); } int read_option(char *s, list *options) { size_t i; size_t len = strlen(s); char *val = 0; for(i = 0; i < len; ++i){ if(s[i] == '='){ s[i] = '\0'; val = s+i+1; break; } } if(i == len-1) return 0; char *key = s; option_insert(options, key, val); return 1; } void option_unused(list *l) { node *n = l->front; while(n){ kvp *p = (kvp *)n->val; if(!p->used){ fprintf(stderr, "Unused field: '%s = %s'\n", p->key, p->val); } n = n->next; } } char *option_find(list *l, char *key) { node *n = l->front; while(n){ kvp *p = (kvp *)n->val; if(strcmp(p->key, key) == 0){ p->used = 1; return p->val; } n = n->next; } return 0; } char *option_find_str(list *l, char *key, char *def) { char *v = option_find(l, key); if(v) return v; if(def) fprintf(stderr, "%s: Using default '%s'\n", key, def); return def; } int option_find_int(list *l, char *key, int def) { char *v = option_find(l, key); if(v) return atoi(v); fprintf(stderr, "%s: Using default '%d'\n", key, def); return def; } int option_find_int_quiet(list *l, char *key, int def) { char *v = option_find(l, key); if(v) return atoi(v); return def; } float option_find_float_quiet(list *l, char *key, float def) { char *v = option_find(l, key); if(v) return atof(v); return def; } float option_find_float(list *l, char *key, float def) { char *v = option_find(l, key); if(v) return atof(v); fprintf(stderr, "%s: Using default '%lf'\n", key, def); return def; } list *read_data_cfg(char *filename) { FILE *file = fopen(filename, "r"); if(file == 0) file_error(filename); char *line; int nu = 0; list *options = make_list(); while((line=fgetl(file)) != 0){ ++ nu; strip(line); switch(line[0]){ case '\0': case '#': case ';': free(line); break; default: if(!read_option(line, options)){ fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line); free(line); } break; } } fclose(file); return options; } ///////////////////option_list end image make_empty_image(int w, int h, int c) { image out; out.data = 0; out.h = h; out.w = w; out.c = c; return out; } list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } char **get_labels(char *filename) { list *plist = get_paths(filename); char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } image make_image(int w, int h, int c) { image out = make_empty_image(w,h,c); out.data = (float *)calloc(h*w*c, sizeof(float)); return out; } static float get_pixel(image m, int x, int y, int c) { assert(x < m.w && y < m.h && c < m.c); return m.data[c*m.h*m.w + y*m.w + x]; } static void set_pixel(image m, int x, int y, int c, float val) { if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return; assert(x < m.w && y < m.h && c < m.c); m.data[c*m.h*m.w + y*m.w + x] = val; } static void add_pixel(image m, int x, int y, int c, float val) { assert(x < m.w && y < m.h && c < m.c); m.data[c*m.h*m.w + y*m.w + x] += val; } void free_image(image m) { if(m.data){ free(m.data); } } image resize_image(image im, int w, int h) { image resized = make_image(w, h, im.c); image part = make_image(w, im.h, im.c); int r, c, k; float w_scale = (float)(im.w - 1) / (w - 1); float h_scale = (float)(im.h - 1) / (h - 1); for(k = 0; k < im.c; ++k){ for(r = 0; r < im.h; ++r){ for(c = 0; c < w; ++c){ float val = 0; if(c == w-1 || im.w == 1){ val = get_pixel(im, im.w-1, r, k); } else { float sx = c*w_scale; int ix = (int) sx; float dx = sx - ix; val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix+1, r, k); } set_pixel(part, c, r, k, val); } } } for(k = 0; k < im.c; ++k){ for(r = 0; r < h; ++r){ float sy = r*h_scale; int iy = (int) sy; float dy = sy - iy; for(c = 0; c < w; ++c){ float val = (1-dy) * get_pixel(part, c, iy, k); set_pixel(resized, c, r, k, val); } if(r == h-1 || im.h == 1) continue; for(c = 0; c < w; ++c){ float val = dy * get_pixel(part, c, iy+1, k); add_pixel(resized, c, r, k, val); } } } free_image(part); return resized; } void fill_image(image m, float s) { int i; for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] = s; } void embed_image(image source, image dest, int dx, int dy) { int x,y,k; for(k = 0; k < source.c; ++k){ for(y = 0; y < source.h; ++y){ for(x = 0; x < source.w; ++x){ float val = get_pixel(source, x,y,k); set_pixel(dest, dx+x, dy+y, k, val); } } } } image letterbox_image(image im, int w, int h) { int new_w = im.w; int new_h = im.h; if (((float)w/im.w) < ((float)h/im.h)) { new_w = w; new_h = (im.h * w)/im.w; } else { new_h = h; new_w = (im.w * h)/im.h; } image resized = resize_image(im, new_w, new_h); image boxed = make_image(w, h, im.c); fill_image(boxed, .5); //int i; //for(i = 0; i < boxed.w*boxed.h*boxed.c; ++i) boxed.data[i] = 0; embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2); free_image(resized); return boxed; } image load_image_stb(char *filename, int channels) { int w, h, c; unsigned char *data = stbi_load(filename, &w, &h, &c, channels); if (!data) { fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason()); exit(0); } if(channels) c = channels; int i,j,k; image im = make_image(w, h, c); for(k = 0; k < c; ++k){ for(j = 0; j < h; ++j){ for(i = 0; i < w; ++i){ int dst_index = i + w*j + w*h*k; int src_index = k + c*i + c*w*j; im.data[dst_index] = (float)data[src_index]/255.; } } } free(data); return im; } void save_image_png(image im, const char *name) { char buff[256]; //sprintf(buff, "%s (%d)", name, windows); sprintf(buff, "%s.png", name); unsigned char *data = (unsigned char *)calloc(im.w*im.h*im.c, sizeof(char)); int i,k; for(k = 0; k < im.c; ++k){ for(i = 0; i < im.w*im.h; ++i){ data[i*im.c+k] = (unsigned char) (255*im.data[i + k*im.w*im.h]); } } int success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c); free(data); if(!success) fprintf(stderr, "Failed to write image %s\n", buff); } image **load_alphabet() { int i, j; const int nsize = 8; image **alphabets = (image **)calloc(nsize, sizeof(image)); for(j = 0; j < nsize; ++j){ alphabets[j] = (image *)calloc(128, sizeof(image)); for(i = 32; i < 127; ++i){ char buff[256]; sprintf(buff, "labels/%d_%d.png", i, j); //alphabets[j][i] = load_image_color(buff, 0, 0); alphabets[j][i] = load_image_stb(buff, 3); } } return alphabets; } ///////////////////activation begin static inline float stair_activate(float x) { int n = floor(x); if (n%2 == 0) return floor(x/2.); else return (x - n) + floor(x/2.); } static inline float hardtan_activate(float x) { if (x < -1) return -1; if (x > 1) return 1; return x; } static inline float linear_activate(float x){return x;} static inline float logistic_activate(float x){return 1./(1. + exp(-x));} static inline float loggy_activate(float x){return 2./(1. + exp(-x)) - 1;} static inline float relu_activate(float x){return x*(x>0);} static inline float elu_activate(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);} static inline float relie_activate(float x){return (x>0) ? x : .01*x;} static inline float ramp_activate(float x){return x*(x>0)+.1*x;} static inline float leaky_activate(float x){return (x>0) ? x : .1*x;} static inline float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);} static inline float plse_activate(float x) { if(x < -4) return .01 * (x + 4); if(x > 4) return .01 * (x - 4) + 1; return .125*x + .5; } static inline float lhtan_activate(float x) { if(x < 0) return .001*x; if(x > 1) return .001*(x-1) + 1; return x; } static inline float lhtan_gradient(float x) { if(x > 0 && x < 1) return 1; return .001; } static inline float hardtan_gradient(float x) { if (x > -1 && x < 1) return 1; return 0; } static inline float linear_gradient(float x){return 1;} static inline float logistic_gradient(float x){return (1-x)*x;} static inline float loggy_gradient(float x) { float y = (x+1.)/2.; return 2*(1-y)*y; } static inline float stair_gradient(float x) { if (floor(x) == x) return 0; return 1; } static inline float relu_gradient(float x){return (x>0);} static inline float elu_gradient(float x){return (x >= 0) + (x < 0)*(x + 1);} static inline float relie_gradient(float x){return (x>0) ? 1 : .01;} static inline float ramp_gradient(float x){return (x>0)+.1;} static inline float leaky_gradient(float x){return (x>0) ? 1 : .1;} static inline float tanh_gradient(float x){return 1-x*x;} static inline float plse_gradient(float x){return (x < 0 || x > 1) ? .01 : .125;} char *get_activation_string(ACTIVATION a) { switch(a){ case LOGISTIC: return "logistic"; case LOGGY: return "loggy"; case RELU: return "relu"; case ELU: return "elu"; case RELIE: return "relie"; case RAMP: return "ramp"; case LINEAR: return "linear"; case TANH: return "tanh"; case PLSE: return "plse"; case LEAKY: return "leaky"; case STAIR: return "stair"; case HARDTAN: return "hardtan"; case LHTAN: return "lhtan"; default: break; } return "relu"; } ACTIVATION get_activation(char *s) { if (strcmp(s, "logistic")==0) return LOGISTIC; if (strcmp(s, "loggy")==0) return LOGGY; if (strcmp(s, "relu")==0) return RELU; if (strcmp(s, "elu")==0) return ELU; if (strcmp(s, "relie")==0) return RELIE; if (strcmp(s, "plse")==0) return PLSE; if (strcmp(s, "hardtan")==0) return HARDTAN; if (strcmp(s, "lhtan")==0) return LHTAN; if (strcmp(s, "linear")==0) return LINEAR; if (strcmp(s, "ramp")==0) return RAMP; if (strcmp(s, "leaky")==0) return LEAKY; if (strcmp(s, "tanh")==0) return TANH; if (strcmp(s, "stair")==0) return STAIR; fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s); return RELU; } float activate(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate(x); case LOGISTIC: return logistic_activate(x); case LOGGY: return loggy_activate(x); case RELU: return relu_activate(x); case ELU: return elu_activate(x); case RELIE: return relie_activate(x); case RAMP: return ramp_activate(x); case LEAKY: return leaky_activate(x); case TANH: return tanh_activate(x); case PLSE: return plse_activate(x); case STAIR: return stair_activate(x); case HARDTAN: return hardtan_activate(x); case LHTAN: return lhtan_activate(x); } return 0; } void activate_array(float *x, const int n, const ACTIVATION a) { int i; for(i = 0; i < n; ++i){ x[i] = activate(x[i], a); } } float gradient(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_gradient(x); case LOGISTIC: return logistic_gradient(x); case LOGGY: return loggy_gradient(x); case RELU: return relu_gradient(x); case ELU: return elu_gradient(x); case RELIE: return relie_gradient(x); case RAMP: return ramp_gradient(x); case LEAKY: return leaky_gradient(x); case TANH: return tanh_gradient(x); case PLSE: return plse_gradient(x); case STAIR: return stair_gradient(x); case HARDTAN: return hardtan_gradient(x); case LHTAN: return lhtan_gradient(x); } return 0; } ///////////////////activation end void copy_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX]; } void fill_cpu(int N, float ALPHA, float *X, int INCX) { int i; for(i = 0; i < N; ++i) X[i*INCX] = ALPHA; } void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); //printf("shorcut_layer batch=%d,stride=%d,sample=%d\n",batch,stride,sample); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int i,j,k,b; for(b = 0; b < batch; ++b){ for(k = 0; k < minc; ++k){ for(j = 0; j < minh; ++j){ for(i = 0; i < minw; ++i){ int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] = s1*out[out_index] + s2*add[add_index]; } } } } } void forward_shortcut_layer(const layer l, network net) { //copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1); //shortcut_cpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.alpha, l.beta, l.output); //activate_array(l.output, l.outputs*l.batch, l.activation); int w = l.w; int h = l.h; int c = l.c; float *add = net.layers[l.index].output; float *out = l.output; float *in = net.input; int i,j,k; for(k = 0; k < c; ++k){ for(j = 0; j < h; ++j){ for(i = 0; i < w; ++i){ int index = i + w*(j + h*k ); out[index] = in[index] + add[index]; } } } } layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2) { fprintf(stderr, "res %3d %4d x%4d x%4d -> %4d x%4d x%4d\n",index, w2,h2,c2, w,h,c); layer l; memset(&l,0,sizeof(layer)); l.type = SHORTCUT; l.batch = batch; l.w = w2; l.h = h2; l.c = c2; l.out_w = w; l.out_h = h; l.out_c = c; l.outputs = w*h*c; l.inputs = l.outputs; l.index = index; l.output = (float *)calloc(l.outputs*batch, sizeof(float));; l.forward = forward_shortcut_layer; return l; } int convolutional_out_height(layer l) { return (l.h + 2*l.pad - l.size) / l.stride + 1; } int convolutional_out_width(layer l) { return (l.w + 2*l.pad - l.size) / l.stride + 1; } static size_t get_workspace_size(layer l){ return (size_t)l.out_h*l.out_w*l.size*l.size*l.c/l.groups*sizeof(float); } void add_bias(float *output, float *biases, int batch, int n, int size) { int i,j,b; for(b = 0; b < batch; ++b){ for(i = 0; i < n; ++i){ for(j = 0; j < size; ++j){ output[(b*n + i)*size + j] += biases[i]; } } } } void scale_bias(float *output, float *scales, int batch, int n, int size) { int i,j,b; for(b = 0; b < batch; ++b){ for(i = 0; i < n; ++i){ for(j = 0; j < size; ++j){ output[(b*n + i)*size + j] *= scales[i]; } } } } float im2col_get_pixel(float *im, int height, int width, int channels, int row, int col, int channel, int pad) { row -= pad; col -= pad; if (row < 0 || col < 0 || row >= height || col >= width) return 0; return im[col + width*(row + height*channel)]; } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { int c,h,w; int height_col = (height + 2*pad - ksize) / stride + 1; int width_col = (width + 2*pad - ksize) / stride + 1; int channels_col = channels * ksize * ksize; for (c = 0; c < channels_col; ++c) { int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = 0; h < height_col; ++h) { for (w = 0; w < width_col; ++w) { int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[i*lda+k]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } if(!TA && !TB) gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); //else if(TA && !TB) // gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); //else if(!TA && TB) // gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); //else // gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { int b, f, i; for(b = 0; b < batch; ++b){ for(f = 0; f < filters; ++f){ for(i = 0; i < spatial; ++i){ int index = b*filters*spatial + f*spatial + i; x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f); } } } } void forward_batchnorm_layer(layer l, network net)//for conv { normalize_cpu(l.output, l.rolling_mean, l.rolling_variance, l.batch, l.out_c, l.out_h*l.out_w); scale_bias(l.output, l.scales, l.batch, l.out_c, l.out_h*l.out_w); add_bias(l.output, l.biases, l.batch, l.out_c, l.out_h*l.out_w); } void CONV_Padding_Relu(float *Input,float *Output,float *Weight,const int InFM_num,const int OutFM_num,const int Kernel_size,const int Kernel_stride,const int Input_w,const int Input_h,const int Padding) { // (output_w - 1)*Kernel_stride + Kernel_size = Input_w const int output_w = (Input_w - Kernel_size + 2*Padding)/Kernel_stride + 1 ; const int output_h = (Input_h - Kernel_size + 2*Padding)/Kernel_stride + 1 ; int x, y, of, inf; int m,n; for( of = 0; of < OutFM_num; of++){ for( y = 0; y < output_h; y++) { for( x = 0; x < output_w; x++){ float tmp = 0.0; for(inf = 0;inf < InFM_num; inf++) { int intput_offset = inf*Input_w*Input_h + (y*Kernel_stride - Padding)*Input_w + x*Kernel_stride - Padding; for(m = 0;m < Kernel_size; m++) { for(n = 0;n < Kernel_size; n++) { int kernel_offset = of*InFM_num*Kernel_size*Kernel_size + inf*Kernel_size*Kernel_size; bool inFM_width = ((x*Kernel_stride + n - Padding) >= 0)&&((x*Kernel_stride + n - Padding) < Input_w); bool inFM_height = ((y*Kernel_stride + m - Padding) >= 0)&&((y*Kernel_stride + m - Padding) < Input_h); if(inFM_width&&inFM_height) tmp += Weight[kernel_offset + m*Kernel_size + n]*Input[intput_offset + m*Input_w + n]; } } } Output[of*output_w*output_h + y*output_w + x] = tmp; } } } } void forward_convolutional_layer(layer l, network net) { int i, j; fill_cpu(l.outputs*l.batch, 0, l.output, 1); //printf("c=%d,n=%d,size=%d,stride=%d,w=%d,h=%d,pad=%d\n",l.c,l.n,l.size,l.stride,l.w,l.h,l.pad); //int m = l.n/l.groups; //int k = l.size*l.size*l.c/l.groups; //int n = l.out_w*l.out_h; //for(i = 0; i < l.batch; ++i){ // for(j = 0; j < l.groups; ++j){ // float *a = l.weights + j*l.nweights/l.groups; // float *b = net.workspace; // float *c = l.output + (i*l.groups + j)*n*m; // im2col_cpu(net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w, // l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b); // gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); // } //} int m = l.n; int k = l.size*l.size*l.c; int n = l.out_w*l.out_h; float *a = l.weights; float *b = net.workspace; float *c = l.output; im2col_cpu(net.input,l.c, l.h, l.w, l.size, l.stride, l.pad, b); gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); //CONV_Padding_Relu(net.input,l.output,l.weights,l.c,l.n,l.size,l.stride,l.w,l.h,l.pad); if(l.batch_normalize){ forward_batchnorm_layer(l, net); } else { add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w); } activate_array(l.output, l.outputs*l.batch, l.activation); } layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam) { int i; layer l; memset(&l,0,sizeof(layer)); l.type = CONVOLUTIONAL; l.groups = groups; l.h = h; l.w = w; l.c = c; l.n = n; l.binary = binary; l.xnor = xnor; l.batch = batch; l.stride = stride; l.size = size; l.pad = padding; l.batch_normalize = batch_normalize; // l.weights = (float *)calloc(c/groups*n*size*size, sizeof(float)); // l.biases = (float *)calloc(n, sizeof(float)); l.nweights = c/groups*n*size*size; l.nbiases = n; int out_w = convolutional_out_width(l); int out_h = convolutional_out_height(l); l.out_h = out_h; l.out_w = out_w; l.out_c = n; l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = l.w * l.h * l.c; // l.output = (float *)calloc(l.batch*l.outputs, sizeof(float)); l.forward = forward_convolutional_layer; if(batch_normalize){ // l.scales = (float *)calloc(n, sizeof(float)); // l.rolling_mean = (float *)calloc(n, sizeof(float)); //l.rolling_variance = (float *)calloc(n, sizeof(float)); } l.workspace_size = get_workspace_size(l); l.activation = activation; fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.); return l; } void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { int i, j, k, b; for(b = 0; b < batch; ++b){ for(k = 0; k < c; ++k){ for(j = 0; j < h*stride; ++j){ for(i = 0; i < w*stride; ++i){ int in_index = b*w*h*c + k*w*h + (j/stride)*w + i/stride; int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i; if(forward) out[out_index] = scale*in[in_index]; else in[in_index] += scale*out[out_index]; } } } } } void forward_upsample_layer(const layer l, network net) { //fill_cpu(l.outputs*l.batch, 0, l.output, 1); //upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output); int c = l.c; int h = l.h; int w = l.w; int stride = l.stride; float *in = net.input; float *out = l.output; int i, j, k; for(k = 0; k < c; ++k){ for(j = 0; j < h*stride; ++j){ for(i = 0; i < w*stride; ++i){ int in_index = k*w*h + (j/stride)*w + i/stride; int out_index = k*w*h*stride*stride + j*w*stride + i; out[out_index] = in[in_index]; } } } } layer make_upsample_layer(int batch, int w, int h, int c, int stride) { layer l; memset(&l,0,sizeof(layer)); l.type = UPSAMPLE; l.batch = batch; l.w = w; l.h = h; l.c = c; l.out_w = w*stride; l.out_h = h*stride; l.out_c = c; if(stride < 0){ stride = -stride; l.reverse=1; l.out_w = w/stride; l.out_h = h/stride; } l.stride = stride; l.outputs = l.out_w*l.out_h*l.out_c; l.inputs = l.w*l.h*l.c; l.output = (float *)calloc(l.outputs*batch, sizeof(float));; l.forward = forward_upsample_layer; if(l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); return l; } void forward_route_layer(const layer l, network net) { int i, j; int offset = 0; for(i = 0; i < l.n; ++i){ int index = l.input_layers[i]; float *input = net.layers[index].output; int input_size = l.input_sizes[i]; copy_cpu(input_size, input, 1, l.output + offset, 1); offset += input_size; } } layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes) { fprintf(stderr,"route "); layer l; memset(&l,0,sizeof(layer)); l.type = ROUTE; l.batch = batch; l.n = n; l.input_layers = input_layers; l.input_sizes = input_sizes; int i; int outputs = 0; for(i = 0; i < n; ++i){ fprintf(stderr," %d", input_layers[i]); outputs += input_sizes[i]; } fprintf(stderr, "\n"); l.outputs = outputs; l.inputs = outputs; // l.output = (float *)calloc(outputs*batch, sizeof(float));; l.forward = forward_route_layer; return l; } static int entry_index(layer l, int batch, int location, int entry) { int n = location / (l.w*l.h); int loc = location % (l.w*l.h); return batch*l.outputs + n*l.w*l.h*(4+l.classes+1) + entry*l.w*l.h + loc; } void forward_yolo_layer(const layer l, network net) { int i,j,b,t,n; //char line[256]; //FILE *fp3; //char filename[256]; //sprintf(filename, "yolo_layer_%d.txt", l.outputs); //printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename); // if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n"); //int x; // for( x = 0; x < l.outputs; x++) //{ // sprintf(line, "%f\n", net.input[x]); // if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n"); // } // fclose(fp3); memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float)); for (b = 0; b < l.batch; ++b){ for(n = 0; n < l.n; ++n){ int index = entry_index(l, b, n*l.w*l.h, 0); activate_array(l.output + index, 2*l.w*l.h, LOGISTIC); index = entry_index(l, b, n*l.w*l.h, 4); activate_array(l.output + index, (1+l.classes)*l.w*l.h, LOGISTIC); } } return ; } layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes) { int i; layer l; memset(&l,0,sizeof(layer)); l.type = YOLO; l.n = n; l.total = total; l.batch = batch; l.h = h; l.w = w; l.c = n*(classes + 4 + 1); l.out_w = l.w; l.out_h = l.h; l.out_c = l.c; l.classes = classes; //l.cost = (float *)calloc(1, sizeof(float)); l.biases = (float *)calloc(total*2, sizeof(float)); if(mask) l.mask = mask; else{ l.mask = (int *)calloc(n, sizeof(int)); for(i = 0; i < n; ++i){ l.mask[i] = i; } } //l.bias_updates = (float *)calloc(n*2, sizeof(float)); l.outputs = h*w*n*(classes + 4 + 1); l.inputs = l.outputs; //l.truths = 90*(4 + 1); //l.delta = (float *)calloc(batch*l.outputs, sizeof(float)); l.output = (float *)calloc(batch*l.outputs, sizeof(float)); for(i = 0; i < total*2; ++i){ l.biases[i] = .5; } l.forward = forward_yolo_layer; fprintf(stderr, "detection\n"); srand(0); return l; } /////////////////praser begin typedef struct{ char *type; list *options; }section; list *read_cfg(char *filename); LAYER_TYPE string_to_layer_type(char * type) { if (strcmp(type, "[shortcut]")==0) return SHORTCUT; if (strcmp(type, "[crop]")==0) return CROP; if (strcmp(type, "[cost]")==0) return COST; if (strcmp(type, "[detection]")==0) return DETECTION; if (strcmp(type, "[region]")==0) return REGION; if (strcmp(type, "[yolo]")==0) return YOLO; if (strcmp(type, "[local]")==0) return LOCAL; if (strcmp(type, "[conv]")==0 || strcmp(type, "[convolutional]")==0) return CONVOLUTIONAL; if (strcmp(type, "[deconv]")==0 || strcmp(type, "[deconvolutional]")==0) return DECONVOLUTIONAL; if (strcmp(type, "[activation]")==0) return ACTIVE; if (strcmp(type, "[logistic]")==0) return LOGXENT; if (strcmp(type, "[l2norm]")==0) return L2NORM; if (strcmp(type, "[net]")==0 || strcmp(type, "[network]")==0) return NETWORK; if (strcmp(type, "[crnn]")==0) return CRNN; if (strcmp(type, "[gru]")==0) return GRU; if (strcmp(type, "[lstm]") == 0) return LSTM; if (strcmp(type, "[rnn]")==0) return RNN; if (strcmp(type, "[conn]")==0 || strcmp(type, "[connected]")==0) return CONNECTED; if (strcmp(type, "[max]")==0 || strcmp(type, "[maxpool]")==0) return MAXPOOL; if (strcmp(type, "[reorg]")==0) return REORG; if (strcmp(type, "[avg]")==0 || strcmp(type, "[avgpool]")==0) return AVGPOOL; if (strcmp(type, "[dropout]")==0) return DROPOUT; if (strcmp(type, "[lrn]")==0 || strcmp(type, "[normalization]")==0) return NORMALIZATION; if (strcmp(type, "[batchnorm]")==0) return BATCHNORM; if (strcmp(type, "[soft]")==0 || strcmp(type, "[softmax]")==0) return SOFTMAX; if (strcmp(type, "[route]")==0) return ROUTE; if (strcmp(type, "[upsample]")==0) return UPSAMPLE; return BLANK; } void free_section(section *s) { free(s->type); node *n = s->options->front; while(n){ kvp *pair = (kvp *)n->val; free(pair->key); free(pair); node *next = n->next; free(n); n = next; } free(s->options); free(s); } void parse_data(char *data, float *a, int n) { int i; if(!data) return; char *curr = data; char *next = data; int done = 0; for(i = 0; i < n && !done; ++i){ while(*++next !='\0' && *next != ','); if(*next == '\0') done = 1; *next = '\0'; sscanf(curr, "%g", &a[i]); curr = next+1; } } typedef struct size_params{ int batch; int inputs; int h; int w; int c; int index; int time_steps; network *net; } size_params; layer parse_convolutional(list *options, size_params params) { int n = option_find_int(options, "filters",1); int size = option_find_int(options, "size",1); int stride = option_find_int(options, "stride",1); int pad = option_find_int_quiet(options, "pad",0); int padding = option_find_int_quiet(options, "padding",0); int groups = option_find_int_quiet(options, "groups", 1); if(pad) padding = size/2; char *activation_s = option_find_str(options, "activation", "logistic"); ACTIVATION activation = get_activation(activation_s); int batch,h,w,c; h = params.h; w = params.w; c = params.c; batch=params.batch; if(!(h && w && c)) error("Layer before convolutional layer must output image."); int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0); int binary = option_find_int_quiet(options, "binary", 0); int xnor = option_find_int_quiet(options, "xnor", 0); layer l = make_convolutional_layer(batch,h,w,c,n,groups,size,stride,padding,activation, batch_normalize, binary, xnor, params.net->adam); l.flipped = option_find_int_quiet(options, "flipped", 0); l.dot = option_find_float_quiet(options, "dot", 0); return l; } int *parse_yolo_mask(char *a, int *num) { int *mask = 0; if(a){ int len = strlen(a); int n = 1; int i; for(i = 0; i < len; ++i){ if (a[i] == ',') ++n; } mask = (int *)calloc(n, sizeof(int)); for(i = 0; i < n; ++i){ int val = atoi(a); mask[i] = val; a = strchr(a, ',')+1; } *num = n; } return mask; } layer parse_yolo(list *options, size_params params) { int classes = option_find_int(options, "classes", 20); int total = option_find_int(options, "num", 1); int num = total; char *a = option_find_str(options, "mask", 0); int *mask = parse_yolo_mask(a, &num); layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes); assert(l.outputs == params.inputs); l.max_boxes = option_find_int_quiet(options, "max",90); l.jitter = option_find_float(options, "jitter", .2); l.ignore_thresh = option_find_float(options, "ignore_thresh", .5); l.truth_thresh = option_find_float(options, "truth_thresh", 1); l.random = option_find_int_quiet(options, "random", 0); a = option_find_str(options, "anchors", 0); if(a){ int len = strlen(a); int n = 1; int i; for(i = 0; i < len; ++i){ if (a[i] == ',') ++n; } for(i = 0; i < n; ++i){ float bias = atof(a); l.biases[i] = bias; a = strchr(a, ',')+1; } } return l; } layer parse_shortcut(list *options, size_params params, network *net) { char *l = option_find(options, "from"); int index = atoi(l); if(index < 0) index = params.index + index; int batch = params.batch; layer from = net->layers[index]; layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c); char *activation_s = option_find_str(options, "activation", "linear"); ACTIVATION activation = get_activation(activation_s); s.activation = activation; s.alpha = option_find_float_quiet(options, "alpha", 1); s.beta = option_find_float_quiet(options, "beta", 1); return s; } layer parse_upsample(list *options, size_params params, network *net) { int stride = option_find_int(options, "stride",2); layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride); l.scale = option_find_float_quiet(options, "scale", 1); return l; } layer parse_route(list *options, size_params params, network *net) { char *l = option_find(options, "layers"); int len = strlen(l); if(!l) error("Route Layer must specify input layers"); int n = 1; int i; for(i = 0; i < len; ++i){ if (l[i] == ',') ++n; } int *layers = (int *)calloc(n, sizeof(int)); int *sizes = (int *)calloc(n, sizeof(int)); for(i = 0; i < n; ++i){ int index = atoi(l); l = strchr(l, ',')+1; if(index < 0) index = params.index + index; layers[i] = index; sizes[i] = net->layers[index].outputs; } int batch = params.batch; layer route_layer = make_route_layer(batch, n, layers, sizes); layer first = net->layers[layers[0]]; route_layer.out_w = first.out_w; route_layer.out_h = first.out_h; route_layer.out_c = first.out_c; for(i = 1; i < n; ++i){ int index = layers[i]; layer next = net->layers[index]; if(next.out_w == first.out_w && next.out_h == first.out_h){ route_layer.out_c += next.out_c; }else{ route_layer.out_h = route_layer.out_w = route_layer.out_c = 0; } } return route_layer; } void softmax(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -FLT_MAX; for(i = 0; i < n; ++i){ if(input[i*stride] > largest) largest = input[i*stride]; } for(i = 0; i < n; ++i){ float e = exp(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int g, b; for(b = 0; b < batch; ++b){ for(g = 0; g < groups; ++g){ softmax(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); } } } void forward_region_layer(const layer l, network net) { int i,j,b,t,n; memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float)); #ifndef GPU for (b = 0; b < l.batch; ++b){ for(n = 0; n < l.n; ++n){ int index = entry_index(l, b, n*l.w*l.h, 0); activate_array(l.output + index, 2*l.w*l.h, LOGISTIC); index = entry_index(l, b, n*l.w*l.h, l.coords); if(!l.background) activate_array(l.output + index, l.w*l.h, LOGISTIC); index = entry_index(l, b, n*l.w*l.h, l.coords + 1); //if(!l.softmax) activate_array(l.output + index, l.classes*l.w*l.h, LOGISTIC); } } if (l.softmax){ int index = entry_index(l, 0, 0, l.coords + !l.background); softmax_cpu(net.input + index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output + index); } // double time1,time2; // time1 = what_time_is_it_now(); // char line[256]; // FILE *fp3; // char filename[256]; // sprintf(filename, "yolo_region_input_float32_%d.txt", 13*13*425); // printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename); // if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n"); // int x; // for( x = 0; x < l.outputs; x++) // { // sprintf(line, "%f\n", net.input[x]); // if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n"); // } // fclose(fp3); // time2 = what_time_is_it_now(); // printf("Predicted in %f seconds.\n",time2 - time1); #endif if(!net.train) return; } layer make_region_layer(int batch, int w, int h, int n, int classes, int coords) { layer l; memset(&l,0,sizeof(layer)); l.type = REGION; l.n = n; l.batch = batch; l.h = h; l.w = w; l.c = n*(classes + coords + 1); l.out_w = l.w; l.out_h = l.h; l.out_c = l.c; l.classes = classes; l.coords = coords; l.biases = (float *)calloc(n*2, sizeof(float)); l.outputs = h*w*n*(classes + coords + 1); l.inputs = l.outputs; l.truths = 30*(l.coords + 1); l.output = (float *)calloc(batch*l.outputs, sizeof(float)); int i; for(i = 0; i < n*2; ++i){ l.biases[i] = .5; } l.forward = forward_region_layer; fprintf(stderr, "detection\n"); srand(0); return l; } layer parse_region(list *options, size_params params) { int coords = option_find_int(options, "coords", 4); int classes = option_find_int(options, "classes", 20); int num = option_find_int(options, "num", 1); layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords); assert(l.outputs == params.inputs); l.log = option_find_int_quiet(options, "log", 0); l.sqrt = option_find_int_quiet(options, "sqrt", 0); l.softmax = option_find_int(options, "softmax", 0); l.background = option_find_int_quiet(options, "background", 0); l.max_boxes = option_find_int_quiet(options, "max",30); l.jitter = option_find_float(options, "jitter", .2); l.rescore = option_find_int_quiet(options, "rescore",0); l.thresh = option_find_float(options, "thresh", .5); l.classfix = option_find_int_quiet(options, "classfix", 0); l.absolute = option_find_int_quiet(options, "absolute", 0); l.random = option_find_int_quiet(options, "random", 0); l.coord_scale = option_find_float(options, "coord_scale", 1); l.object_scale = option_find_float(options, "object_scale", 1); l.noobject_scale = option_find_float(options, "noobject_scale", 1); l.mask_scale = option_find_float(options, "mask_scale", 1); l.class_scale = option_find_float(options, "class_scale", 1); l.bias_match = option_find_int_quiet(options, "bias_match",0); char *tree_file = option_find_str(options, "tree", 0); // if (tree_file) l.softmax_tree = read_tree(tree_file); char *map_file = option_find_str(options, "map", 0); // if (map_file) l.map = read_map(map_file); char *a = option_find_str(options, "anchors", 0); if(a){ int len = strlen(a); int n = 1; int i; for(i = 0; i < len; ++i){ if (a[i] == ',') ++n; } for(i = 0; i < n; ++i){ float bias = atof(a); l.biases[i] = bias; a = strchr(a, ',')+1; } } return l; } void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int b,i,j,k; int out_c = c/(stride*stride); for(b = 0; b < batch; ++b){ for(k = 0; k < c; ++k){ for(j = 0; j < h; ++j){ for(i = 0; i < w; ++i){ int in_index = i + w*(j + h*(k + c*b)); int c2 = k % out_c; int offset = k / out_c; int w2 = i*stride + offset % stride; int h2 = j*stride + offset / stride; int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); if(forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; } } } } } void forward_reorg_layer(const layer l, network net) { int i; //if(l.flatten){ // memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float)); // if(l.reverse){ // flatten(l.output, l.w*l.h, l.c, l.batch, 0); // }else{ // flatten(l.output, l.w*l.h, l.c, l.batch, 1); // } //} else if (l.extra) { // for(i = 0; i < l.batch; ++i){ // copy_cpu(l.inputs, net.input + i*l.inputs, 1, l.output + i*l.outputs, 1); // } //} else if (l.reverse){ // reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output); //} else { reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output); //} } layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra) { layer l; memset(&l,0,sizeof(layer)); l.type = REORG; l.batch = batch; l.stride = stride; l.extra = extra; l.h = h; l.w = w; l.c = c; l.flatten = flatten; if(reverse){ l.out_w = w*stride; l.out_h = h*stride; l.out_c = c/(stride*stride); }else{ l.out_w = w/stride; l.out_h = h/stride; l.out_c = c*(stride*stride); } l.reverse = reverse; l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = h*w*c; if(l.extra){ l.out_w = l.out_h = l.out_c = 0; l.outputs = l.inputs + l.extra; } if(extra){ fprintf(stderr, "reorg %4d -> %4d\n", l.inputs, l.outputs); } else { fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); } int output_size = l.outputs * batch; //l.output = (float *)calloc(output_size, sizeof(float)); l.forward = forward_reorg_layer; return l; } layer parse_reorg(list *options, size_params params) { int stride = option_find_int(options, "stride",1); int reverse = option_find_int_quiet(options, "reverse",0); int flatten = option_find_int_quiet(options, "flatten",0); int extra = option_find_int_quiet(options, "extra",0); int batch,h,w,c; h = params.h; w = params.w; c = params.c; batch=params.batch; if(!(h && w && c)) error("Layer before reorg layer must output image."); layer layer = make_reorg_layer(batch,w,h,c,stride,reverse, flatten, extra); return layer; } void forward_maxpool_layer(layer l, network net) { int b,i,j,k,m,n; int w_offset = -l.pad; int h_offset = -l.pad; int h = l.out_h; int w = l.out_w; int c = l.c; for(b = 0; b < l.batch; ++b){ for(k = 0; k < c; ++k){ for(i = 0; i < h; ++i){ for(j = 0; j < w; ++j){ int out_index = j + w*(i + h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for(n = 0; n < l.size; ++n){ for(m = 0; m < l.size; ++m){ int cur_h = h_offset + i*l.stride + n; int cur_w = w_offset + j*l.stride + m; int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c)); int valid = (cur_h >= 0 && cur_h < l.h && cur_w >= 0 && cur_w < l.w); float val = (valid != 0) ? net.input[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } l.output[out_index] = max; l.indexes[out_index] = max_i; } } } } } layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding) { layer l; memset(&l,0,sizeof(layer)); l.type = MAXPOOL; l.batch = batch; l.h = h; l.w = w; l.c = c; l.pad = padding; l.out_w = (w + padding - size)/stride + 1; l.out_h = (h + padding - size)/stride + 1; l.out_c = c; l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = h*w*c; l.size = size; l.stride = stride; int output_size = l.out_h * l.out_w * l.out_c * batch; fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c); return l; } layer parse_maxpool(list *options, size_params params) { int stride = option_find_int(options, "stride",1); int size = option_find_int(options, "size",stride); int padding = option_find_int_quiet(options, "padding", size-1); int batch,h,w,c; h = params.h; w = params.w; c = params.c; batch=params.batch; if(!(h && w && c)) error("Layer before maxpool layer must output image."); layer maxpool_layer = make_maxpool_layer(batch,h,w,c,size,stride,padding); return maxpool_layer; } learning_rate_policy get_policy(char *s) { if (strcmp(s, "random")==0) return RANDOM; if (strcmp(s, "poly")==0) return POLY; if (strcmp(s, "constant")==0) return CONSTANT; if (strcmp(s, "step")==0) return STEP; if (strcmp(s, "exp")==0) return EXP; if (strcmp(s, "sigmoid")==0) return SIG; if (strcmp(s, "steps")==0) return STEPS; fprintf(stderr, "Couldn't find policy %s, going with constant\n", s); return CONSTANT; } void parse_net_options(list *options, network *net) { net->batch = option_find_int(options, "batch",1); net->learning_rate = option_find_float(options, "learning_rate", .001); net->momentum = option_find_float(options, "momentum", .9); net->decay = option_find_float(options, "decay", .0001); int subdivs = option_find_int(options, "subdivisions",1); net->time_steps = option_find_int_quiet(options, "time_steps",1); net->notruth = option_find_int_quiet(options, "notruth",0); net->batch /= subdivs; net->batch *= net->time_steps; net->subdivisions = subdivs; net->random = option_find_int_quiet(options, "random", 0); net->adam = option_find_int_quiet(options, "adam", 0); if(net->adam){ net->B1 = option_find_float(options, "B1", .9); net->B2 = option_find_float(options, "B2", .999); net->eps = option_find_float(options, "eps", .0000001); } net->h = option_find_int_quiet(options, "height",0); net->w = option_find_int_quiet(options, "width",0); net->c = option_find_int_quiet(options, "channels",0); net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c); net->max_crop = option_find_int_quiet(options, "max_crop",net->w*2); net->min_crop = option_find_int_quiet(options, "min_crop",net->w); net->max_ratio = option_find_float_quiet(options, "max_ratio", (float) net->max_crop / net->w); net->min_ratio = option_find_float_quiet(options, "min_ratio", (float) net->min_crop / net->w); net->center = option_find_int_quiet(options, "center",0); net->clip = option_find_float_quiet(options, "clip", 0); net->angle = option_find_float_quiet(options, "angle", 0); net->aspect = option_find_float_quiet(options, "aspect", 1); net->saturation = option_find_float_quiet(options, "saturation", 1); net->exposure = option_find_float_quiet(options, "exposure", 1); net->hue = option_find_float_quiet(options, "hue", 0); if(!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied"); char *policy_s = option_find_str(options, "policy", "constant"); net->policy = get_policy(policy_s); net->burn_in = option_find_int_quiet(options, "burn_in", 0); net->power = option_find_float_quiet(options, "power", 4); if(net->policy == STEP){ net->step = option_find_int(options, "step", 1); net->scale = option_find_float(options, "scale", 1); } else if (net->policy == STEPS){ char *l = option_find(options, "steps"); char *p = option_find(options, "scales"); if(!l || !p) error("STEPS policy must have steps and scales in cfg file"); int len = strlen(l); int n = 1; int i; for(i = 0; i < len; ++i){ if (l[i] == ',') ++n; } int *steps = (int *)calloc(n, sizeof(int)); float *scales = (float *)calloc(n, sizeof(float)); for(i = 0; i < n; ++i){ int step = atoi(l); float scale = atof(p); l = strchr(l, ',')+1; p = strchr(p, ',')+1; steps[i] = step; scales[i] = scale; } net->scales = scales; net->steps = steps; net->num_steps = n; } else if (net->policy == EXP){ net->gamma = option_find_float(options, "gamma", 1); } else if (net->policy == SIG){ net->gamma = option_find_float(options, "gamma", 1); net->step = option_find_int(options, "step", 1); } else if (net->policy == POLY || net->policy == RANDOM){ } net->max_batches = option_find_int(options, "max_batches", 0); } int is_network(section *s) { return (strcmp(s->type, "[net]")==0 || strcmp(s->type, "[network]")==0); } network *parse_network_cfg(char *filename) { list *sections = read_cfg(filename); node *n = sections->front; if(!n) error("Config file has no sections"); network *net = make_network(sections->size - 1); net->gpu_index = -1; size_params params; section *s = (section *)n->val; list *options = s->options; if(!is_network(s)) error("First section must be [net] or [network]"); parse_net_options(options, net); params.h = net->h; params.w = net->w; params.c = net->c; params.inputs = net->inputs; params.batch = net->batch; params.time_steps = net->time_steps; params.net = net; size_t workspace_size = 0; n = n->next; int count = 0; free_section(s); fprintf(stderr, "layer filters size input output\n"); while(n){ params.index = count; fprintf(stderr, "%5d ", count); s = (section *)n->val; options = s->options; //layer l = {0}; layer l; memset(&l,0,sizeof(layer)); LAYER_TYPE lt = string_to_layer_type(s->type); if(lt == CONVOLUTIONAL){ l = parse_convolutional(options, params); }else if(lt == YOLO){ l = parse_yolo(options, params); }else if(lt == ROUTE){ l = parse_route(options, params, net); }else if(lt == UPSAMPLE){ l = parse_upsample(options, params, net); }else if(lt == SHORTCUT){ l = parse_shortcut(options, params, net); }else if(lt == REGION){ l = parse_region(options, params); }else if(lt == YOLO){ l = parse_yolo(options, params); }else if(lt == MAXPOOL){ l = parse_maxpool(options, params); }else if(lt == REORG){ l = parse_reorg(options, params); }else{ fprintf(stderr, "Type not recognized: %s\n", s->type); } l.clip = net->clip; l.truth = option_find_int_quiet(options, "truth", 0); l.onlyforward = option_find_int_quiet(options, "onlyforward", 0); l.stopbackward = option_find_int_quiet(options, "stopbackward", 0); l.dontsave = option_find_int_quiet(options, "dontsave", 0); // l.dontload = option_find_int_quiet(options, "dontload", 0); // l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0); //l.learning_rate_scale = option_find_float_quiet(options, "learning_rate", 1); l.smooth = option_find_float_quiet(options, "smooth", 0); option_unused(options); net->layers[count] = l; if (l.workspace_size > workspace_size) workspace_size = l.workspace_size; free_section(s); n = n->next; ++count; if(n){ params.h = l.out_h; params.w = l.out_w; params.c = l.out_c; params.inputs = l.outputs; } } free_list(sections); layer out = get_network_output_layer(net); net->outputs = out.outputs; net->output = out.output; //net->input = (float *)calloc(net->inputs*net->batch, sizeof(float)); workspace_size = 0;//donot calloc workspace //if(workspace_size){ // //printf("%ld\n", workspace_size); // net->workspace = (float *)calloc(1, workspace_size); //} return net; } list *read_cfg(char *filename) { FILE *file = fopen(filename, "r"); if(file == 0) file_error(filename); char *line; int nu = 0; list *options = make_list(); section *current = 0; while((line=fgetl(file)) != 0){ ++ nu; strip(line); switch(line[0]){ case '[': current = (section *)malloc(sizeof(section)); list_insert(options, current); current->options = make_list(); current->type = line; break; case '\0': case '#': case ';': free(line); break; default: if(!read_option(line, current->options)){ fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line); free(line); } break; } } fclose(file); return options; } void load_convolutional_weights(layer l, FILE *fp) { int num = l.nweights; fread(l.biases, sizeof(float), l.n, fp); if (l.batch_normalize){ fread(l.scales, sizeof(float), l.n, fp); fread(l.rolling_mean, sizeof(float), l.n, fp); fread(l.rolling_variance, sizeof(float), l.n, fp); } fread(l.weights, sizeof(float), num, fp); } void load_weights_upto(network *net, char *filename, int start, int cutoff) { fprintf(stderr, "Loading weights from %s...", filename); fflush(stdout); FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); int major; int minor; int revision; fread(&major, sizeof(int), 1, fp); fread(&minor, sizeof(int), 1, fp); fread(&revision, sizeof(int), 1, fp); printf("major=%d;minor=%d;revision=%d\n",major,minor,revision);// 0 2 0 printf("if true ro false:%d\n",(major*10 + minor) >= 2 && major < 1000 && minor < 1000); if ((major*10 + minor) >= 2 && major < 1000 && minor < 1000){ //fread(net->seen, sizeof(size_t), 1, fp); fread(net->seen, sizeof(size_t), 1, fp); fread(net->seen, sizeof(size_t), 1, fp); }else { int iseen = 0; fread(&iseen, sizeof(int), 1, fp); *net->seen = iseen; } //printf("sizeof(size_t)=%u\n",sizeof(size_t));// in my PC is 4 int i; for(i = start; i < net->n && i < cutoff; ++i){ layer l = net->layers[i]; if(l.type == CONVOLUTIONAL){ load_convolutional_weights(l, fp); } } fprintf(stderr, "Done!\n"); fclose(fp); } void load_weights(network *net, char *filename) { load_weights_upto(net, filename, 0, net->n); } /////////////////praser end /////////////////network begin load_args get_base_args(network *net) { load_args args = {0}; args.w = net->w; args.h = net->h; args.size = net->w; args.min = net->min_crop; args.max = net->max_crop; args.angle = net->angle; args.aspect = net->aspect; args.exposure = net->exposure; args.center = net->center; args.saturation = net->saturation; args.hue = net->hue; return args; } network *load_network(char *cfg, char *weights, int clear) { network *net = parse_network_cfg(cfg); //if(weights && weights[0] != 0){ // load_weights(net, weights); //} if(clear) (*net->seen) = 0; return net; } char *get_layer_string(LAYER_TYPE a) { switch(a){ case CONVOLUTIONAL: return "convolutional"; case ACTIVE: return "activation"; case LOCAL: return "local"; case DECONVOLUTIONAL: return "deconvolutional"; case CONNECTED: return "connected"; case RNN: return "rnn"; case GRU: return "gru"; case LSTM: return "lstm"; case CRNN: return "crnn"; case MAXPOOL: return "maxpool"; case REORG: return "reorg"; case AVGPOOL: return "avgpool"; case SOFTMAX: return "softmax"; case DETECTION: return "detection"; case REGION: return "region"; case YOLO: return "yolo"; case DROPOUT: return "dropout"; case CROP: return "crop"; case COST: return "cost"; case ROUTE: return "route"; case SHORTCUT: return "shortcut"; case NORMALIZATION: return "normalization"; case BATCHNORM: return "batchnorm"; default: break; } return "none"; } network *make_network(int n) { network *net = (network *)calloc(1, sizeof(network)); net->n = n; net->layers = (layer *)calloc(net->n, sizeof(layer)); net->seen = (size_t *)calloc(1, sizeof(size_t)); net->t = (int *)calloc(1, sizeof(int)); net->cost = (float *)calloc(1, sizeof(float)); return net; } void forward_network(network *netp) { network net = *netp; int i; for(i = 0; i < net.n; ++i){ net.index = i; layer l = net.layers[i]; l.forward(l, net); net.input = l.output; // printf("layer [%d]\n",i); } } void set_temp_network(network *net, float t) { int i; for(i = 0; i < net->n; ++i){ net->layers[i].temperature = t; } } void set_batch_network(network *net, int b) { net->batch = b; int i; for(i = 0; i < net->n; ++i){ net->layers[i].batch = b; } } float *network_predict(network *net, float *input) { network orig = *net; net->input = input; net->truth = 0; net->train = 0; net->delta = 0; forward_network(net); float *out = net->output; *net = orig; return out; } int yolo_num_detections(layer l, float thresh) { int i, n; int count = 0; for (i = 0; i < l.w*l.h; ++i){ for(n = 0; n < l.n; ++n){ int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4); if(l.output[obj_index] > thresh){ ++count; } } } return count; } int num_detections(network *net, float thresh) { int i; int s = 0; for(i = 0; i < net->n; ++i){ layer l = net->layers[i]; if(l.type == YOLO){ s += yolo_num_detections(l, thresh); } if(l.type == DETECTION || l.type == REGION){ s += l.w*l.h*l.n; } } return s; } detection *make_network_boxes(network *net, float thresh, int *num) { layer l = net->layers[net->n - 1]; int i; int nboxes = num_detections(net, thresh); //printf("num_detections nboxes = %d\n",nboxes); if(num) *num = nboxes; detection *dets = (detection *)calloc(nboxes, sizeof(detection)); for(i = 0; i < nboxes; ++i){ dets[i].prob = (float *)calloc(l.classes, sizeof(float)); } return dets; } box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride) { box b; b.x = (i + x[index + 0*stride]) / lw; b.y = (j + x[index + 1*stride]) / lh; b.w = exp(x[index + 2*stride]) * biases[2*n] / w; b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h; return b; } void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative) { int i; int new_w=0; int new_h=0; if (((float)netw/w) < ((float)neth/h)) { new_w = netw; new_h = (h * netw)/w; } else { new_h = neth; new_w = (w * neth)/h; } for (i = 0; i < n; ++i){ box b = dets[i].bbox; b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw); b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth); b.w *= (float)netw/new_w; b.h *= (float)neth/new_h; if(!relative){ b.x *= w; b.w *= w; b.y *= h; b.h *= h; } dets[i].bbox = b; } } int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets) { int i,j,n; float *predictions = l.output; // if (l.batch == 2) avg_flipped_yolo(l); int count = 0; for (i = 0; i < l.w*l.h; ++i){ int row = i / l.w; int col = i % l.w; for(n = 0; n < l.n; ++n){ int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4); float objectness = predictions[obj_index]; if(objectness <= thresh) continue; int box_index = entry_index(l, 0, n*l.w*l.h + i, 0); dets[count].bbox = get_yolo_box(predictions, l.biases, l.mask[n], box_index, col, row, l.w, l.h, netw, neth, l.w*l.h); dets[count].objectness = objectness; dets[count].classes = l.classes; for(j = 0; j < l.classes; ++j){ int class_index = entry_index(l, 0, n*l.w*l.h + i, 4 + 1 + j); float prob = objectness*predictions[class_index]; dets[count].prob[j] = (prob > thresh) ? prob : 0; } ++count; } } correct_yolo_boxes(dets, count, w, h, netw, neth, relative); return count; } box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride) { box b; b.x = (i + x[index + 0*stride]) / w; b.y = (j + x[index + 1*stride]) / h; b.w = exp(x[index + 2*stride]) * biases[2*n] / w; b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h; return b; } void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative) { int i; int new_w=0; int new_h=0; if (((float)netw/w) < ((float)neth/h)) { new_w = netw; new_h = (h * netw)/w; } else { new_h = neth; new_w = (w * neth)/h; } for (i = 0; i < n; ++i){ box b = dets[i].bbox; b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw); b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth); b.w *= (float)netw/new_w; b.h *= (float)neth/new_h; if(!relative){ b.x *= w; b.w *= w; b.y *= h; b.h *= h; } dets[i].bbox = b; } } void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets) { int i,j,n,z; float *predictions = l.output; if (l.batch == 2) { float *flip = l.output + l.outputs; for (j = 0; j < l.h; ++j) { for (i = 0; i < l.w/2; ++i) { for (n = 0; n < l.n; ++n) { for(z = 0; z < l.classes + l.coords + 1; ++z){ int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i; int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1); float swap = flip[i1]; flip[i1] = flip[i2]; flip[i2] = swap; if(z == 0){ flip[i1] = -flip[i1]; flip[i2] = -flip[i2]; } } } } } for(i = 0; i < l.outputs; ++i){ l.output[i] = (l.output[i] + flip[i])/2.; } } for (i = 0; i < l.w*l.h; ++i){ int row = i / l.w; int col = i % l.w; for(n = 0; n < l.n; ++n){ int index = n*l.w*l.h + i; for(j = 0; j < l.classes; ++j){ dets[index].prob[j] = 0; } int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords); int box_index = entry_index(l, 0, n*l.w*l.h + i, 0); int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4); float scale = l.background ? 1 : predictions[obj_index]; dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h, l.w*l.h); dets[index].objectness = scale > thresh ? scale : 0; if(dets[index].mask){ for(j = 0; j < l.coords - 4; ++j){ dets[index].mask[j] = l.output[mask_index + j*l.w*l.h]; } } int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background); if(dets[index].objectness){ for(j = 0; j < l.classes; ++j){ int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j); float prob = scale*predictions[class_index]; dets[index].prob[j] = (prob > thresh) ? prob : 0; } } } } correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative); } void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets) { int j; for(j = 0; j < net->n; ++j){ layer l = net->layers[j]; if(l.type == YOLO){ int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets); dets += count; } if(l.type == REGION){ get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets); dets += l.w*l.h*l.n; } } } detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num) { detection *dets = make_network_boxes(net, thresh, num); fill_network_boxes(net, w, h, thresh, hier, map, relative, dets); return dets; } void free_detections(detection *dets, int n) { int i; for(i = 0; i < n; ++i){ free(dets[i].prob); if(dets[i].mask) free(dets[i].mask); } free(dets); } int network_width(network *net){return net->w;} int network_height(network *net){return net->h;} layer get_network_output_layer(network *net) { int i; for(i = net->n - 1; i >= 0; --i){ if(net->layers[i].type != COST) break; } return net->layers[i]; } void free_network(network *net) { int i; for(i = 0; i < net->n; ++i){ free_layer(net->layers[i]); } free(net->layers); if(net->input) free(net->input); if(net->truth) free(net->truth); free(net); } layer network_output_layer(network *net) { int i; for(i = net->n - 1; i >= 0; --i){ if(net->layers[i].type != COST) break; } return net->layers[i]; } int network_inputs(network *net) { return net->layers[0].inputs; } int network_outputs(network *net) { return network_output_layer(net).outputs; } float *network_output(network *net) { return network_output_layer(net).output; } //////////////////network end //////////////////////box begin int nms_comparator(const void *pa, const void *pb) { detection a = *(detection *)pa; detection b = *(detection *)pb; float diff = 0; if(b.sort_class >= 0){ diff = a.prob[b.sort_class] - b.prob[b.sort_class]; } else { diff = a.objectness - b.objectness; } if(diff < 0) return 1; else if(diff > 0) return -1; return 0; } float overlap(float x1, float w1, float x2, float w2) { float l1 = x1 - w1/2; float l2 = x2 - w2/2; float left = l1 > l2 ? l1 : l2; float r1 = x1 + w1/2; float r2 = x2 + w2/2; float right = r1 < r2 ? r1 : r2; return right - left; } float box_intersection(box a, box b) { float w = overlap(a.x, a.w, b.x, b.w); float h = overlap(a.y, a.h, b.y, b.h); if(w < 0 || h < 0) return 0; float area = w*h; return area; } float box_union(box a, box b) { float i = box_intersection(a, b); float u = a.w*a.h + b.w*b.h - i; return u; } float box_iou(box a, box b) { return box_intersection(a, b)/box_union(a, b); } void do_nms_sort(detection *dets, int total, int classes, float thresh) { int i, j, k; k = total-1; for(i = 0; i <= k; ++i){ if(dets[i].objectness == 0){ detection swap = dets[i]; dets[i] = dets[k]; dets[k] = swap; --k; --i; } } total = k+1; for(k = 0; k < classes; ++k){ for(i = 0; i < total; ++i){ dets[i].sort_class = k; } qsort(dets, total, sizeof(detection), nms_comparator); for(i = 0; i < total; ++i){ if(dets[i].prob[k] == 0) continue; box a = dets[i].bbox; for(j = i+1; j < total; ++j){ box b = dets[j].bbox; if (box_iou(a, b) > thresh){ dets[j].prob[k] = 0; } } } } } //////////////////////box end //////////////////////image begin float colors[6][3] = { {1,0,1}, {0,0,1},{0,1,1},{0,1,0},{1,1,0},{1,0,0} }; float get_color(int c, int x, int max) { float ratio = ((float)x/max)*5; int i = floor(ratio); int j = ceil(ratio); ratio -= i; float r = (1-ratio) * colors[i][c] + ratio*colors[j][c]; //printf("%f\n", r); return r; } static float get_pixel_extend(image m, int x, int y, int c) { if(x < 0 || x >= m.w || y < 0 || y >= m.h) return 0; /* if(x < 0) x = 0; if(x >= m.w) x = m.w-1; if(y < 0) y = 0; if(y >= m.h) y = m.h-1; */ if(c < 0 || c >= m.c) return 0; return get_pixel(m, x, y, c); } void composite_image(image source, image dest, int dx, int dy) { int x,y,k; for(k = 0; k < source.c; ++k){ for(y = 0; y < source.h; ++y){ for(x = 0; x < source.w; ++x){ float val = get_pixel(source, x, y, k); float val2 = get_pixel_extend(dest, dx+x, dy+y, k); set_pixel(dest, dx+x, dy+y, k, val * val2); } } } } image border_image(image a, int border) { image b = make_image(a.w + 2*border, a.h + 2*border, a.c); int x,y,k; for(k = 0; k < b.c; ++k){ for(y = 0; y < b.h; ++y){ for(x = 0; x < b.w; ++x){ float val = get_pixel_extend(a, x - border, y - border, k); if(x - border < 0 || x - border >= a.w || y - border < 0 || y - border >= a.h) val = 1; set_pixel(b, x, y, k, val); } } } return b; } image copy_image(image p) { image copy = p; copy.data = (float *)calloc(p.h*p.w*p.c, sizeof(float)); memcpy(copy.data, p.data, p.h*p.w*p.c*sizeof(float)); return copy; } image tile_images(image a, image b, int dx) { if(a.w == 0) return copy_image(b); image c = make_image(a.w + b.w + dx, (a.h > b.h) ? a.h : b.h, (a.c > b.c) ? a.c : b.c); fill_cpu(c.w*c.h*c.c, 1, c.data, 1); embed_image(a, c, 0, 0); composite_image(b, c, a.w + dx, 0); return c; } image get_label(image **characters, char *string, int size) { size = size/10; if(size > 7) size = 7; image label = make_empty_image(0,0,0); while(*string){ image l = characters[size][(int)*string]; image n = tile_images(label, l, -size - 1 + (size+1)/2); free_image(label); label = n; ++string; } image b = border_image(label, label.h*.25); free_image(label); return b; } void draw_label(image a, int r, int c, image label, const float *rgb) { int w = label.w; int h = label.h; if (r - h >= 0) r = r - h; int i, j, k; for(j = 0; j < h && j + r < a.h; ++j){ for(i = 0; i < w && i + c < a.w; ++i){ for(k = 0; k < label.c; ++k){ float val = get_pixel(label, i, j, k); set_pixel(a, i+c, j+r, k, rgb[k] * val); } } } } void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b) { //normalize_image(a); int i; if(x1 < 0) x1 = 0; if(x1 >= a.w) x1 = a.w-1; if(x2 < 0) x2 = 0; if(x2 >= a.w) x2 = a.w-1; if(y1 < 0) y1 = 0; if(y1 >= a.h) y1 = a.h-1; if(y2 < 0) y2 = 0; if(y2 >= a.h) y2 = a.h-1; for(i = x1; i <= x2; ++i){ a.data[i + y1*a.w + 0*a.w*a.h] = r; a.data[i + y2*a.w + 0*a.w*a.h] = r; a.data[i + y1*a.w + 1*a.w*a.h] = g; a.data[i + y2*a.w + 1*a.w*a.h] = g; a.data[i + y1*a.w + 2*a.w*a.h] = b; a.data[i + y2*a.w + 2*a.w*a.h] = b; } for(i = y1; i <= y2; ++i){ a.data[x1 + i*a.w + 0*a.w*a.h] = r; a.data[x2 + i*a.w + 0*a.w*a.h] = r; a.data[x1 + i*a.w + 1*a.w*a.h] = g; a.data[x2 + i*a.w + 1*a.w*a.h] = g; a.data[x1 + i*a.w + 2*a.w*a.h] = b; a.data[x2 + i*a.w + 2*a.w*a.h] = b; } } void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b) { int i; for(i = 0; i < w; ++i){ draw_box(a, x1+i, y1+i, x2-i, y2-i, r, g, b); } } image float_to_image(int w, int h, int c, float *data) { image out = make_empty_image(w,h,c); out.data = data; return out; } image threshold_image(image im, float thresh) { int i; image t = make_image(im.w, im.h, im.c); for(i = 0; i < im.w*im.h*im.c; ++i){ t.data[i] = im.data[i]>thresh ? 1 : 0; } return t; } void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes) { int i,j; for(i = 0; i < num; ++i){ char labelstr[4096] = {0}; int class_t = -1; for(j = 0; j < classes; ++j){ if (dets[i].prob[j] > thresh){ if (class_t < 0) { strcat(labelstr, names[j]); class_t = j; } else { strcat(labelstr, ", "); strcat(labelstr, names[j]); } printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100); } } if(class_t >= 0){ int width = im.h * .006; //printf("%d %s: %.0f%%\n", i, names[class], prob*100); int offset = class_t*123457 % classes; float red = get_color(2,offset,classes); float green = get_color(1,offset,classes); float blue = get_color(0,offset,classes); float rgb[3]; //width = prob*20+2; rgb[0] = red; rgb[1] = green; rgb[2] = blue; box b = dets[i].bbox; //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); int left = (b.x-b.w/2.)*im.w; int right = (b.x+b.w/2.)*im.w; int top = (b.y-b.h/2.)*im.h; int bot = (b.y+b.h/2.)*im.h; if(left < 0) left = 0; if(right > im.w-1) right = im.w-1; if(top < 0) top = 0; if(bot > im.h-1) bot = im.h-1; draw_box_width(im, left, top, right, bot, width, red, green, blue); if (alphabet) { image label = get_label(alphabet, labelstr, (im.h*.03)); draw_label(im, top + width, left, label, rgb); free_image(label); } if (dets[i].mask){ image mask = float_to_image(14, 14, 1, dets[i].mask); image resized_mask = resize_image(mask, b.w*im.w, b.h*im.h); image tmask = threshold_image(resized_mask, .5); embed_image(tmask, im, left, top); free_image(mask); free_image(resized_mask); free_image(tmask); } } } } //////////////////////image end //////////////////////////HLS begin //#define MIN(x,y) ((x)<(y)?(x):(y)) //#define S 2 //#define K 3 // //#define Tn 1 //#define Tm 16 //#define Tr 13 //#define Tc 13 //#define OnChipIB_Width ((Tc-1)*S+K) //#define OnChipIB_Height ((Tr-1)*S+K) #define MAX(x,y) ((x)>(y)?(x):(y)) #define MIN(x,y) ((x)<(y)?(x):(y)) #define S 2 #define K 3 #define Tn 4 #define Tm 32 #define Tr 26 #define Tc 26 #define OnChipIB_Width ((Tc-1)*S+K) #define OnChipIB_Height ((Tr-1)*S+K) #define ALPHA_BETA_MAX_NUM 1024 #define INTERWIDTH 20 void copy_mem2dev(uint8_t *orig,uint32_t byte_num, unsigned long in_buffer) { int fd = open("/dev/mem", O_RDWR); unsigned char *virtual_addr; uint32_t RequestByteNum;// must page if(byte_num%(4*1024)==0) RequestByteNum = byte_num; else { RequestByteNum = (byte_num/(4*1024)+1)*(4*1024); } virtual_addr = (unsigned char *)mmap(NULL, RequestByteNum, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)in_buffer); if(virtual_addr == MAP_FAILED) { perror("Virtual_addr_in mappong for absolute memory access failed!\n"); return; } memcpy(virtual_addr,orig,byte_num); munmap((void *)virtual_addr, byte_num); close(fd); } void copy_dev2mem(uint8_t *dst,uint32_t byte_num, unsigned long in_buffer) { int fd = open("/dev/mem", O_RDWR); unsigned char *virtual_addr; uint32_t RequestByteNum;// must page if(byte_num%(4*1024)==0) RequestByteNum = byte_num; else { RequestByteNum = (byte_num/(4*1024)+1)*(4*1024); } virtual_addr = (unsigned char *)mmap(NULL, RequestByteNum, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)in_buffer); if(virtual_addr == MAP_FAILED) { perror("Virtual_addr_in mappong for absolute memory access failed!\n"); return; } memcpy((uint8_t *)dst,virtual_addr,byte_num); munmap((void *)virtual_addr, byte_num); close(fd); } int copy_file2mem(char *bin_file,uint32_t byte_num,unsigned long in_buffer) { unsigned char *buffer = (unsigned char *)malloc(1024*1024); if(buffer==NULL){ printf("cannot malloc buffer 1024*1024 byte\n"); return -1; } FILE *fp; if( (fp = fopen(bin_file, "rb")) == NULL)fprintf(stderr,"CANNOT OPEN bin_file\n"); int rd_num; unsigned long offset = 0; while(rd_num = fread(buffer, sizeof(unsigned char), 1024*1024, fp)) { copy_mem2dev(buffer,rd_num, in_buffer+offset); // printf("rd_num=%d\n",rd_num); offset += rd_num; } printf("copy_file2mem offset=%d\n",offset); fclose(fp); free(buffer); return 0; } int copy_mem2file(char *bin_file,uint32_t byte_num,unsigned long in_buffer) { void *buffer = malloc(1024*1024); if(buffer==NULL){ printf("cannot malloc buffer 1024*1024 byte\n"); return -1; } FILE *fp; if( (fp = fopen(bin_file, "wb")) == NULL)fprintf(stderr,"CANNOT OPEN bin_file\n"); int x = byte_num; int addbyte; unsigned long offset = 0; while(addbyte=((x<1024*1024)?x:(1024*1024))) { copy_dev2mem((uint8_t *)buffer,addbyte, in_buffer+offset); fwrite(buffer , sizeof(unsigned char), addbyte, fp); x -= addbyte; offset += addbyte; } printf("copy_mem2file offset=%d\n",offset); fclose(fp); free(buffer); return 0; } //double what_time_is_it_now() //{ // struct timeval time; // if (gettimeofday(&time,NULL)){ // return 0; // } // return (double)time.tv_sec + (double)time.tv_usec * .000001; //} int YOLO2_FPGA(int In_Address,int Out_Address,int Weight_offset,int Beta_offset,const int InFM_num,const int OutFM_num, const int Kernel_size,const int Kernel_stride, const int Input_w,const int Input_h,const int Output_w,const int Output_h, const int Padding,const bool IsNL,const bool IsBN, const int TM,const int TN,const int TR,const int TC, const int mLoops,const int nLoops,const int rLoops,const int cLoops,const int LayerType, int InputQ,int OutputQ,int WeightQ,int BetaQ,unsigned int WEIGHT_BASE,unsigned int BETA_BASE) { int T2Rate; switch(Input_w) { case 26: T2Rate = 2; break; case 13: T2Rate = 4; break; default: T2Rate = 1; break; } const unsigned char TRow = (TR-1)*Kernel_stride+Kernel_size; int trow_loops = (int)ceil(((float)TRow/T2Rate)); unsigned int ap_idle; unsigned int ap_done; unsigned long int PhysicalAddress = YOLO2_BASEADDR; int map_len = 0x180; int fd = open("/dev/mem", O_RDWR); unsigned char *xbase_address; xbase_address = (unsigned char *)mmap(NULL, map_len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)PhysicalAddress); if(xbase_address == MAP_FAILED) { perror("1:Init Mapping memory for absolute memory access failed.\n"); return -1; } while(1) { ap_idle = ((ReadReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_AP_CTRL) >> 2) && 0x1); if(ap_idle) break; } //#define WEIGHT_BASE (0x10000000) //#define BETA_BASE (0x1C25F000) WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT_R_DATA, In_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT1_DATA, In_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT2_DATA, In_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT3_DATA, In_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT_R_DATA, Out_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT1_DATA, Out_Address); // WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT2_DATA, Out_Address); // WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT3_DATA, Out_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_WEIGHT_DATA, WEIGHT_BASE + Weight_offset*4); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_BETA_DATA, BETA_BASE + Beta_offset*4); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INFM_NUM_DATA, InFM_num); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTFM_NUM_DATA, OutFM_num); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_KERNEL_SIZE_DATA, Kernel_size); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_KERNEL_STRIDE_DATA, Kernel_stride); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT_W_DATA, Input_w); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT_H_DATA, Input_h); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT_W_DATA, Output_w); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT_H_DATA, Output_h); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_PADDING_DATA, Padding); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_ISNL_DATA, IsNL); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_ISBN_DATA, IsBN); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TM_DATA, TM); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TN_DATA, TN); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TR_DATA, TR); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TC_DATA, TC); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_MLOOPS_DATA, mLoops); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_NLOOPS_DATA, nLoops); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_RLOOPS_DATA, rLoops); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_CLOOPS_DATA, cLoops); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_LAYERTYPE_DATA, LayerType); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUTQ_DATA, InputQ); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUTQ_DATA, OutputQ); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_WEIGHTQ_DATA, WeightQ); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_BETAQ_DATA, BetaQ); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TROW_LOOPS_DATA, trow_loops); // double time1,time2; // time1 = what_time_is_it_now(); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_GIE, 0x0); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_AP_CTRL, 0x1);//Start while(1) { ap_done = ((ReadReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_AP_CTRL) >> 1) && 0x1); if(ap_done) break; } // time2 = what_time_is_it_now(); // printf("START TO DONE in %f seconds.\n",time2 - time1); munmap((void *)xbase_address, map_len); close(fd); return 0; } ////////////////////////////////////////////////////////PL v3 end void yolov2_hls_ps(network *net, float *input,unsigned int WEIGHT_BASE,unsigned int BETA_BASE,unsigned int MEM_BASE) { int x; network orig = *net; net->input = input; int weight_offset[32] = {864, 18432, 73728, 8192, 73728, 294912, 32768, 294912, 1179648, 131072, 1179648, 131072, 1179648, 4718592, 524288, 4718592, 524288, 4718592, 9437184, 9437184, 32768, 11796480, 435200, 0, 0, 0, 0, 0, 0, 0, 0, 0}; int beta_offset[32] = {32, 64, 128, 64, 128, 256, 128, 256, 512, 256, 512, 256, 512, 1024, 512, 1024, 512, 1024, 1024, 1024, 64, 1024, 425, 0, 0, 0, 0, 0, 0, 0, 0, 0}; int offset_index = 0; double time1,time2; time1 = what_time_is_it_now(); copy_file2mem("weightsv2_comb_reorg_ap16.bin",(203767168)/2,WEIGHT_BASE);//->C253D80 printf("yolov2_w copy ok\n"); copy_file2mem("biasv2_comb_ap16.bin",(43044+4)/2,BETA_BASE);//->C268724 203812864 = C25F000 printf("yolov2_b copy ok\n"); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); float *region_buf = (float *)calloc(13*13*432,sizeof(float)); if(!region_buf) printf("region_buf calloc fail\n"); #define MEM_LEN (416*416*32*2+208*208*32*2) unsigned int Memory_top = MEM_BASE; unsigned int Memory_bottom = MEM_BASE + MEM_LEN; int in_ptr[32]; int out_ptr[32]; ///////////////////// #define QNUM 23 int inputQ[QNUM+1]; int weightQ[QNUM]; int betaQ[QNUM]; FILE *Qin; Qin = fopen("yolov2_ap16_inout_maxQ_24.bin","rb"); if(!Qin) file_error("Qin error 1\n"); fread(inputQ,sizeof(int),QNUM+1,Qin); fclose(Qin); if(inputQ[20] < inputQ[21]) inputQ[21] = inputQ[20]; else inputQ[20] = inputQ[21]; for(x=0;x<QNUM+1;x++) printf("[%2d inputQ]=%2d\n",x,inputQ[x]); Qin = fopen("weightsv2_comb_reorg_ap16_maxQ_23.bin","rb"); if(!Qin) file_error("Qin error 2\n"); fread(weightQ,sizeof(int),QNUM,Qin); fclose(Qin); for(x=0;x<QNUM;x++) printf("[%2d weightQ]=%2d\n",x,weightQ[x]); Qin = fopen("biasv2_comb_ap16_maxQ_23.bin","rb"); if(!Qin) file_error("Qin error 4\n"); fread(betaQ,sizeof(int),QNUM,Qin); fclose(Qin); for(x=0;x<QNUM;x++) printf("[%2d betaQ]=%2d\n",x,betaQ[x]); const double LastLayerOutputPara = pow(2.0,-inputQ[23]); ///////////////////// #define ROUTE16_LEN (26*26*512*4/2) #define CONV27_LEN (13*13*256*4/2) #define CONV24_LEN (13*13*1024*4/2) int *input_tmp_mem = (int *)calloc(416*416*32/2,sizeof(int)); if(!input_tmp_mem) file_error("input_tmp_mem error \n"); int *region_input_buffer = (int *)calloc(13*13*432*4/2,sizeof(int)); if(!region_input_buffer) file_error("region_input_buffer error \n"); int tmp_in; short current_in,next_in; bool NextPixelInFlag = true; int InputPixelOffset = 0; for(x=0;x<416*416*3;x++)//1st Layer input Q14 { if(NextPixelInFlag) { current_in = (short)(input[x]*pow(2.0,14)); NextPixelInFlag = false; } else { next_in = (short)(input[x]*pow(2.0,14)); tmp_in = (next_in<<16) + (current_in); input_tmp_mem[InputPixelOffset] = tmp_in; InputPixelOffset++; NextPixelInFlag = true; } } copy_mem2dev((uint8_t *)input_tmp_mem,416*416*3*4/2, MEM_BASE); free(input_tmp_mem); for(x=0;x<18;x++) { if(x%2==0) { in_ptr[x] = Memory_top; out_ptr[x] = Memory_bottom - net->layers[x].outputs*4/2 ; } else { in_ptr[x] = out_ptr[x-1]; out_ptr[x] = Memory_top; } } for(x=18;x<25;x++) { if(x%2==0) { in_ptr[x] = Memory_top; out_ptr[x] = Memory_bottom - ROUTE16_LEN - net->layers[x].outputs*4/2; }else { in_ptr[x] = out_ptr[x-1]; out_ptr[x] = Memory_top; } } in_ptr[26] = Memory_bottom - ROUTE16_LEN; out_ptr[26] = Memory_top; in_ptr[27] = Memory_top; out_ptr[27] = Memory_bottom - ROUTE16_LEN - CONV24_LEN - CONV27_LEN; in_ptr[29] = out_ptr[27]; out_ptr[29] = Memory_top; in_ptr[30] = Memory_top; out_ptr[30] = Memory_bottom - (net->layers[30].outputs + 13*13*3)*4/2; if(out_ptr[30]%(4*1024)!=0) { out_ptr[30] = (out_ptr[30]/(4*1024)-1)*(4*1024); } in_ptr[31] = out_ptr[30]; network netp = *net; int i; int j; int woffset = 0; int boffset = 0; int TR,TC,TM,TN; int output_w,output_h; int rLoops,cLoops,mLoops,nLoops; double time_sum = 0.0; int INPUTQ; for(i = 0; i < netp.n; ++i) { netp.index = i; layer l = netp.layers[i]; printf("Layer[%2d]: ",i); switch(l.type) { case CONVOLUTIONAL: printf("outputMemory:%8d;BN=%d;Activation=%d;conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n",l.outputs,l.batch_normalize,l.activation, l.n, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.); output_w = (l.w - l.size + 2*l.pad)/l.stride + 1 ; output_h = (l.h - l.size + 2*l.pad)/l.stride + 1 ; TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1 TR = MIN(output_h,TR); TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc); TC = MIN(output_w,TC); TM = MIN(l.n,Tm); TN = MIN(l.c,Tn); rLoops = (int)ceil(((float)output_h)/TR); cLoops = (int)ceil(((float)output_w)/TC); mLoops = (int)ceil(((float)l.n)/TM); nLoops = (int)ceil(((float)l.c)/TN); INPUTQ = inputQ[offset_index]; if(i==26) INPUTQ = inputQ[12]; time1 = what_time_is_it_now(); YOLO2_FPGA(in_ptr[i],out_ptr[i],woffset/2,boffset/2, l.c,l.n,l.size, l.stride,l.w,l.h,output_w,output_h, l.pad,l.activation==LEAKY?1:0,l.batch_normalize?1:0, TM,TN,TR,TC, mLoops,nLoops,rLoops,cLoops,0, INPUTQ,inputQ[offset_index+1],weightQ[offset_index],betaQ[offset_index], WEIGHT_BASE,BETA_BASE); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); time_sum += (time2 - time1); woffset += weight_offset[offset_index]; boffset += beta_offset[offset_index]; offset_index++; break; case MAXPOOL: printf("outputMemory:%8d;max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c); output_w = l.out_h; output_h = l.out_w; TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1 TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc); TR = MIN(output_h,TR); TC = MIN(output_w,TC); TM = MIN(Tm,Tn); TM = MIN(l.c,TM); TN = TM; rLoops = (int)ceil(((float)output_h)/TR); cLoops = (int)ceil(((float)output_w)/TC); mLoops = (int)ceil(((float)l.c)/TM); time1 = what_time_is_it_now(); YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,l.c,l.c, l.size,l.stride,l.w,l.h,output_w,output_h, 0,0,0,TM,TN,TR,TC,mLoops,1,rLoops,cLoops,1, inputQ[offset_index],inputQ[offset_index],INTERWIDTH,INTERWIDTH, WEIGHT_BASE,BETA_BASE); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); time_sum += (time2 - time1); break; case REORG: printf("outputMemory:%8d;reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c); output_w = 26; output_h = 32*13; TR = MIN(((OnChipIB_Height-l.stride)/l.stride+1),Tr);//keep Kernel_stride>=1 TR = MIN(output_h,TR); TC = MIN(((OnChipIB_Width-l.stride)/l.stride+1),Tc); TC = MIN(output_w,TC); TM = 4; TN = TM; rLoops = (int)ceil(((float)output_h)/TR); cLoops = (int)ceil(((float)output_w)/TC); mLoops = 1; time1 = what_time_is_it_now(); YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,1,4, l.stride,l.stride,52,32*26,output_w,output_h, 0,0,0,TM,TN,TR,TC,mLoops,1,rLoops,cLoops,2, inputQ[offset_index],inputQ[offset_index],INTERWIDTH,INTERWIDTH, WEIGHT_BASE,BETA_BASE); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); time_sum += (time2 - time1); break; case ROUTE: printf("outputMemory:%8d;route ",l.outputs); for(j = 0; j < l.n; ++j){ printf(" %d", l.input_layers[j]); } printf("\n"); break; case REGION: // first=time(NULL); time1 = what_time_is_it_now(); printf("outputMemory:%8d;Detection\n",l.outputs); copy_dev2mem((uint8_t *)region_input_buffer,13*13*432*4/2, in_ptr[i]); bool NextPixelFlag = true; int OutputPixelOffset = 0; short current_p,next_p,output_p; int *Output_ptr = (int *)(region_input_buffer); for(j=0;j<l.outputs;j++) { if(NextPixelFlag) { int tmp_p = Output_ptr[OutputPixelOffset]; OutputPixelOffset++; current_p = tmp_p; next_p = tmp_p >> 16; output_p = current_p; NextPixelFlag = false; }else { output_p = next_p; NextPixelFlag = true; } region_buf[j] = output_p*LastLayerOutputPara; } netp.input = region_buf; //netp.input = in_ptr[i]; forward_region_layer(l,netp); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); time_sum += (time2 - time1); break; } netp.input = l.output; } printf("TIME_SUM Predicted in %f seconds.\n",time_sum); *net = orig; free(region_input_buffer); free(region_buf); // free(Memory_buf); // free(Weight_buf); // free(Alpha_buf); // free(Beta_buf); } //////////////////////////HLS end #endif
kpoint.c
/* Copyright (C) 2008 Atsushi Togo */ /* All rights reserved. */ /* This file is part of spglib. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include "mathfunc.h" #include "kpoint.h" #include "kgrid.h" #ifdef KPTWARNING #include <stdio.h> #define warning_print(...) fprintf(stderr,__VA_ARGS__) #else #define warning_print(...) #endif #define KPT_NUM_BZ_SEARCH_SPACE 125 static int bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = { { 0, 0, 0}, { 0, 0, 1}, { 0, 0, 2}, { 0, 0, -2}, { 0, 0, -1}, { 0, 1, 0}, { 0, 1, 1}, { 0, 1, 2}, { 0, 1, -2}, { 0, 1, -1}, { 0, 2, 0}, { 0, 2, 1}, { 0, 2, 2}, { 0, 2, -2}, { 0, 2, -1}, { 0, -2, 0}, { 0, -2, 1}, { 0, -2, 2}, { 0, -2, -2}, { 0, -2, -1}, { 0, -1, 0}, { 0, -1, 1}, { 0, -1, 2}, { 0, -1, -2}, { 0, -1, -1}, { 1, 0, 0}, { 1, 0, 1}, { 1, 0, 2}, { 1, 0, -2}, { 1, 0, -1}, { 1, 1, 0}, { 1, 1, 1}, { 1, 1, 2}, { 1, 1, -2}, { 1, 1, -1}, { 1, 2, 0}, { 1, 2, 1}, { 1, 2, 2}, { 1, 2, -2}, { 1, 2, -1}, { 1, -2, 0}, { 1, -2, 1}, { 1, -2, 2}, { 1, -2, -2}, { 1, -2, -1}, { 1, -1, 0}, { 1, -1, 1}, { 1, -1, 2}, { 1, -1, -2}, { 1, -1, -1}, { 2, 0, 0}, { 2, 0, 1}, { 2, 0, 2}, { 2, 0, -2}, { 2, 0, -1}, { 2, 1, 0}, { 2, 1, 1}, { 2, 1, 2}, { 2, 1, -2}, { 2, 1, -1}, { 2, 2, 0}, { 2, 2, 1}, { 2, 2, 2}, { 2, 2, -2}, { 2, 2, -1}, { 2, -2, 0}, { 2, -2, 1}, { 2, -2, 2}, { 2, -2, -2}, { 2, -2, -1}, { 2, -1, 0}, { 2, -1, 1}, { 2, -1, 2}, { 2, -1, -2}, { 2, -1, -1}, {-2, 0, 0}, {-2, 0, 1}, {-2, 0, 2}, {-2, 0, -2}, {-2, 0, -1}, {-2, 1, 0}, {-2, 1, 1}, {-2, 1, 2}, {-2, 1, -2}, {-2, 1, -1}, {-2, 2, 0}, {-2, 2, 1}, {-2, 2, 2}, {-2, 2, -2}, {-2, 2, -1}, {-2, -2, 0}, {-2, -2, 1}, {-2, -2, 2}, {-2, -2, -2}, {-2, -2, -1}, {-2, -1, 0}, {-2, -1, 1}, {-2, -1, 2}, {-2, -1, -2}, {-2, -1, -1}, {-1, 0, 0}, {-1, 0, 1}, {-1, 0, 2}, {-1, 0, -2}, {-1, 0, -1}, {-1, 1, 0}, {-1, 1, 1}, {-1, 1, 2}, {-1, 1, -2}, {-1, 1, -1}, {-1, 2, 0}, {-1, 2, 1}, {-1, 2, 2}, {-1, 2, -2}, {-1, 2, -1}, {-1, -2, 0}, {-1, -2, 1}, {-1, -2, 2}, {-1, -2, -2}, {-1, -2, -1}, {-1, -1, 0}, {-1, -1, 1}, {-1, -1, 2}, {-1, -1, -2}, {-1, -1, -1} }; static MatINT *get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal); static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]); static int get_ir_reciprocal_mesh(int grid_address[][3], int map[], const int mesh[3], const int is_shift[3], const MatINT * rot_reciprocal); static int get_ir_reciprocal_mesh_normal(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); static int get_ir_reciprocal_mesh_distortion(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); static int get_num_ir(int ir_mapping_table[], const int mesh[3]); static int relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]); static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]); static int check_mesh_symmetry(const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); /* grid_address (e.g. 4x4x4 mesh, unless GRID_ORDER_XYZ is defined) */ /* [[ 0 0 0] */ /* [ 1 0 0] */ /* [ 2 0 0] */ /* [-1 0 0] */ /* [ 0 1 0] */ /* [ 1 1 0] */ /* [ 2 1 0] */ /* [-1 1 0] */ /* .... ] */ /* */ /* Each value of 'map' correspnds to the index of grid_point. */ int kpt_get_irreducible_reciprocal_mesh(int grid_address[][3], int map[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { int num_ir; num_ir = get_ir_reciprocal_mesh(grid_address, map, mesh, is_shift, rot_reciprocal); return num_ir; } int kpt_get_stabilized_reciprocal_mesh(int grid_address[][3], int map[], const int mesh[3], const int is_shift[3], const int is_time_reversal, const MatINT * rotations, const int num_q, SPGCONST double qpoints[][3]) { int num_ir; MatINT *rot_reciprocal, *rot_reciprocal_q; double tolerance; rot_reciprocal = NULL; rot_reciprocal_q = NULL; rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal); tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]); rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal, tolerance, num_q, qpoints); num_ir = get_ir_reciprocal_mesh(grid_address, map, mesh, is_shift, rot_reciprocal_q); mat_free_MatINT(rot_reciprocal_q); rot_reciprocal_q = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return num_ir; } void kpt_get_grid_points_by_rotations(int rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3]) { int i; int address_double_orig[3], address_double[3]; for (i = 0; i < 3; i++) { address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = kgd_get_grid_point_double_mesh(address_double, mesh); } } void kpt_get_BZ_grid_points_by_rotations(int rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3], const int bz_map[]) { int i; int address_double_orig[3], address_double[3], bzmesh[3]; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = bz_map[kgd_get_grid_point_double_mesh(address_double, bzmesh)]; } } int kpt_relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { return relocate_BZ_grid_address(bz_grid_address, bz_map, grid_address, mesh, rec_lattice, is_shift); } MatINT *kpt_get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { return get_point_group_reciprocal(rotations, is_time_reversal); } MatINT *kpt_get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]) { return get_point_group_reciprocal_with_q(rot_reciprocal, symprec, num_q, qpoints); } /* Return NULL if failed */ static MatINT *get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { int i, j, num_rot; MatINT *rot_reciprocal, *rot_return; int *unique_rot; SPGCONST int inversion[3][3] = { {-1, 0, 0 }, { 0,-1, 0 }, { 0, 0,-1 } }; rot_reciprocal = NULL; rot_return = NULL; unique_rot = NULL; if (is_time_reversal) { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size * 2)) == NULL) { return NULL; } } else { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size)) == NULL) { return NULL; } } if ((unique_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { unique_rot[i] = -1; } for (i = 0; i < rotations->size; i++) { mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]); if (is_time_reversal) { mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size+i], inversion, rot_reciprocal->mat[i]); } } num_rot = 0; for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_rot; j++) { if (mat_check_identity_matrix_i3(rot_reciprocal->mat[unique_rot[j]], rot_reciprocal->mat[i])) { goto escape; } } unique_rot[num_rot] = i; num_rot++; escape: ; } if ((rot_return = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_return->mat[i], rot_reciprocal->mat[unique_rot[i]]); } } free(unique_rot); unique_rot = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return rot_return; } /* Return NULL if failed */ static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]) { int i, j, k, l, is_all_ok, num_rot; int *ir_rot; double q_rot[3], diff[3]; MatINT * rot_reciprocal_q; ir_rot = NULL; rot_reciprocal_q = NULL; is_all_ok = 0; num_rot = 0; if ((ir_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of ir_rot could not be allocated."); return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { ir_rot[i] = -1; } for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_q; j++) { is_all_ok = 0; mat_multiply_matrix_vector_id3(q_rot, rot_reciprocal->mat[i], qpoints[j]); for (k = 0; k < num_q; k++) { for (l = 0; l < 3; l++) { diff[l] = q_rot[l] - qpoints[k][l]; diff[l] -= mat_Nint(diff[l]); } if (mat_Dabs(diff[0]) < symprec && mat_Dabs(diff[1]) < symprec && mat_Dabs(diff[2]) < symprec) { is_all_ok = 1; break; } } if (! is_all_ok) { break; } } if (is_all_ok) { ir_rot[num_rot] = i; num_rot++; } } if ((rot_reciprocal_q = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_reciprocal_q->mat[i], rot_reciprocal->mat[ir_rot[i]]); } } free(ir_rot); ir_rot = NULL; return rot_reciprocal_q; } static int get_ir_reciprocal_mesh(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { if (check_mesh_symmetry(mesh, is_shift, rot_reciprocal)) { return get_ir_reciprocal_mesh_normal(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); } else { return get_ir_reciprocal_mesh_distortion(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); } } static int get_ir_reciprocal_mesh_normal(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { /* In the following loop, mesh is doubled. */ /* Even and odd mesh numbers correspond to */ /* is_shift[i] are 0 or 1, respectively. */ /* is_shift = [0,0,0] gives Gamma center mesh. */ /* grid: reducible grid points */ /* ir_mapping_table: the mapping from each point to ir-point. */ int i, j, grid_point_rot; int address_double[3], address_double_rot[3]; kgd_get_all_grid_addresses(grid_address, mesh); #pragma omp parallel for private(j, grid_point_rot, address_double, address_double_rot) for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { kgd_get_grid_address_double_mesh(address_double, grid_address[i], mesh, is_shift); ir_mapping_table[i] = i; for (j = 0; j < rot_reciprocal->size; j++) { mat_multiply_matrix_vector_i3(address_double_rot, rot_reciprocal->mat[j], address_double); grid_point_rot = kgd_get_grid_point_double_mesh(address_double_rot, mesh); if (grid_point_rot < ir_mapping_table[i]) { #ifdef _OPENMP ir_mapping_table[i] = grid_point_rot; #else ir_mapping_table[i] = ir_mapping_table[grid_point_rot]; break; #endif } } } return get_num_ir(ir_mapping_table, mesh); } static int get_ir_reciprocal_mesh_distortion(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { int i, j, k, grid_point_rot, indivisible; int address_double[3], address_double_rot[3], divisor[3]; kgd_get_all_grid_addresses(grid_address, mesh); for (i = 0; i < 3; i++) { divisor[i] = mesh[(i + 1) % 3] * mesh[(i + 2) % 3]; } #pragma omp parallel for private(j, k, grid_point_rot, address_double, address_double_rot) for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { kgd_get_grid_address_double_mesh(address_double, grid_address[i], mesh, is_shift); for (j = 0; j < 3; j++) { address_double[j] *= divisor[j]; } ir_mapping_table[i] = i; for (j = 0; j < rot_reciprocal->size; j++) { mat_multiply_matrix_vector_i3(address_double_rot, rot_reciprocal->mat[j], address_double); for (k = 0; k < 3; k++) { indivisible = address_double_rot[k] % divisor[k]; if (indivisible) {break;} address_double_rot[k] /= divisor[k]; if ((address_double_rot[k] % 2 != 0 && is_shift[k] == 0) || (address_double_rot[k] % 2 == 0 && is_shift[k] == 1)) { indivisible = 1; break; } } if (indivisible) {continue;} grid_point_rot = kgd_get_grid_point_double_mesh(address_double_rot, mesh); if (grid_point_rot < ir_mapping_table[i]) { #ifdef _OPENMP ir_mapping_table[i] = grid_point_rot; #else ir_mapping_table[i] = ir_mapping_table[grid_point_rot]; break; #endif } } } return get_num_ir(ir_mapping_table, mesh); } static int get_num_ir(int ir_mapping_table[], const int mesh[3]) { int i, num_ir; num_ir = 0; #pragma omp parallel for reduction(+:num_ir) for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { if (ir_mapping_table[i] == i) { num_ir++; } } #ifdef _OPENMP for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { ir_mapping_table[i] = ir_mapping_table[ir_mapping_table[i]]; } #endif return num_ir; } /* Relocate grid addresses to first Brillouin zone */ /* bz_grid_address[prod(mesh + 1)][3] */ /* bz_map[prod(mesh * 2)] */ static int relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { double tolerance, min_distance; double q_vector[3], distance[KPT_NUM_BZ_SEARCH_SPACE]; int bzmesh[3], bz_address_double[3]; int i, j, k, min_index, boundary_num_gp, total_num_gp, bzgp, gp; tolerance = get_tolerance_for_BZ_reduction(rec_lattice, mesh); for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; } for (i = 0; i < bzmesh[0] * bzmesh[1] * bzmesh[2]; i++) { bz_map[i] = -1; } boundary_num_gp = 0; total_num_gp = mesh[0] * mesh[1] * mesh[2]; /* Multithreading doesn't work for this loop since gp calculated */ /* with boundary_num_gp is unstable to store bz_grid_address. */ for (i = 0; i < total_num_gp; i++) { for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { for (k = 0; k < 3; k++) { q_vector[k] = ((grid_address[i][k] + bz_search_space[j][k] * mesh[k]) * 2 + is_shift[k]) / ((double)mesh[k]) / 2; } mat_multiply_matrix_vector_d3(q_vector, rec_lattice, q_vector); distance[j] = mat_norm_squared_d3(q_vector); } min_distance = distance[0]; min_index = 0; for (j = 1; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance) { min_distance = distance[j]; min_index = j; } } for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance + tolerance) { if (j == min_index) { gp = i; } else { gp = boundary_num_gp + total_num_gp; } for (k = 0; k < 3; k++) { bz_grid_address[gp][k] = grid_address[i][k] + bz_search_space[j][k] * mesh[k]; bz_address_double[k] = bz_grid_address[gp][k] * 2 + is_shift[k]; } bzgp = kgd_get_grid_point_double_mesh(bz_address_double, bzmesh); bz_map[bzgp] = gp; if (j != min_index) { boundary_num_gp++; } } } } return boundary_num_gp + total_num_gp; } static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]) { int i, j; double tolerance; double length[3]; for (i = 0; i < 3; i++) { length[i] = 0; for (j = 0; j < 3; j++) { length[i] += rec_lattice[j][i] * rec_lattice[j][i]; } length[i] /= mesh[i] * mesh[i]; } tolerance = length[0]; for (i = 1; i < 3; i++) { if (tolerance < length[i]) { tolerance = length[i]; } } tolerance *= 0.01; return tolerance; } static int check_mesh_symmetry(const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { int i; int eq[3]; eq[0] = 0; /* a=b */ eq[1] = 0; /* b=c */ eq[2] = 0; /* c=a */ for (i = 0; i < rot_reciprocal->size; i++) { if (rot_reciprocal->mat[i][0][0] == 0 && rot_reciprocal->mat[i][1][0] == 1 && rot_reciprocal->mat[i][2][0] == 0) {eq[0] = 1;} if (rot_reciprocal->mat[i][0][0] == 0 && rot_reciprocal->mat[i][1][0] == 0 && rot_reciprocal->mat[i][2][0] == 1) {eq[2] = 1;} if (rot_reciprocal->mat[i][0][1] == 0 && rot_reciprocal->mat[i][1][1] == 0 && rot_reciprocal->mat[i][2][1] == 1) {eq[1] = 1;} } return (((eq[0] && mesh[0] == mesh[1] && is_shift[0] == is_shift[1]) || (!eq[0])) && ((eq[1] && mesh[1] == mesh[2] && is_shift[1] == is_shift[2]) || (!eq[1])) && ((eq[2] && mesh[2] == mesh[0] && is_shift[2] == is_shift[0]) || (!eq[2]))); }
get_max_threads.c
// RUN: %compile-run-and-check #include <omp.h> #include <stdio.h> int main(){ int max_threads = -1; int num_threads = -1; #pragma omp target map(tofrom: max_threads) max_threads = omp_get_max_threads(); #pragma omp target parallel map(tofrom: num_threads) { #pragma omp master num_threads = omp_get_num_threads(); } // CHECK: Max Threads: 128, Num Threads: 128 printf("Max Threads: %d, Num Threads: %d\n", max_threads, num_threads); return 0; }
omp_smithW.c
/********************************************************************************* * Smith–Waterman algorithm * Purpose: Local alignment of nucleotide or protein sequences * Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro * * Compilation: gcc omp_smithW.c -o omp_smithW -fopenmp -DDEBUG // debugging mode * gcc omp_smithW.c -O3 -o omp_smithW -fopenmp // production run * Execution: ./omp_smithW <number_of_col> <number_of_rows> * * Updated by C. Liao, Jan 2nd, 2019 *********************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <time.h> #include <assert.h> #include "parameters.h" /*-------------------------------------------------------------------- * Text Tweaks */ #define RESET "\033[0m" #define BOLDRED "\033[1m\033[31m" /* Bold Red */ /* End of text tweaks */ /*-------------------------------------------------------------------- * Constants */ #define PATH -1 #define NONE 0 #define UP 1 #define LEFT 2 #define DIAGONAL 3 /* End of constants */ /*-------------------------------------------------------------------- * Helpers */ #define min(x, y) (((x) < (y)) ? (x) : (y)) #define max(a,b) ((a) > (b) ? a : b) // #define DEBUG /* End of Helpers */ /*-------------------------------------------------------------------- * Functions Prototypes */ void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos); int matchMissmatchScore(long long int i, long long int j); void backtrack(int* P, long long int maxPos); void printMatrix(int* matrix); void printPredecessorMatrix(int* matrix); void generate(void); long long int nElement(long long int i); void calcFirstDiagElement(long long int i, long long int *si, long long int *sj); /* End of prototypes */ /*-------------------------------------------------------------------- * Global Variables */ bool useBuiltInData=true; //Defines size of strings to be compared long long int m = 8 ; //Columns - Size of string a long long int n = 9; //Lines - Size of string b // the generated scoring matrix's size is m++ and n++ later to have the first row/column as 0s. //Defines scores int matchScore = 3; int missmatchScore = -3; int gapScore = -2; //Strings over the Alphabet Sigma char *a, *b; /* End of global variables */ /*-------------------------------------------------------------------- * Function: main */ int main(int argc, char* argv[]) { // thread_count is no longer used int thread_count; if (argc==3) { m = strtoll(argv[1], NULL, 10); n = strtoll(argv[2], NULL, 10); useBuiltInData = false; } //#ifdef DEBUG if (useBuiltInData) printf ("Using built-in data for testing ..\n"); printf("Problem size: Matrix[%lld][%lld], FACTOR=%d CUTOFF=%d\n", n, m, FACTOR, CUTOFF); //#endif //Allocates a and b a = (char*) malloc(m * sizeof(char)); b = (char*) malloc(n * sizeof(char)); //Because now we have zeros m++; n++; //Allocates similarity matrix H int *H; H = (int *) calloc(m * n, sizeof(int)); //Allocates predecessor matrix P int *P; P = (int *)calloc(m * n, sizeof(int)); if (useBuiltInData) { //Uncomment this to test the sequence available at //http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 // OBS: m=11 n=7 // a[0] = 'C'; // a[1] = 'G'; // a[2] = 'T'; // a[3] = 'G'; // a[4] = 'A'; // a[5] = 'A'; // a[6] = 'T'; // a[7] = 'T'; // a[8] = 'C'; // a[9] = 'A'; // a[10] = 'T'; // b[0] = 'G'; // b[1] = 'A'; // b[2] = 'C'; // b[3] = 'T'; // b[4] = 'T'; // b[5] = 'A'; // b[6] = 'C'; // https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example // Using the wiki example to verify the results b[0] = 'G'; b[1] = 'G'; b[2] = 'T'; b[3] = 'T'; b[4] = 'G'; b[5] = 'A'; b[6] = 'C'; b[7] = 'T'; b[8] = 'A'; a[0] = 'T'; a[1] = 'G'; a[2] = 'T'; a[3] = 'T'; a[4] = 'A'; a[5] = 'C'; a[6] = 'G'; a[7] = 'G'; } else { //Gen random arrays a and b generate(); } //Start position for backtrack long long int maxPos = 0; //Calculates the similarity matrix long long int i, j; // The way to generate all wavefront is to go through the top edge elements // starting from the left top of the matrix, go to the bottom top -> down, then left->right // total top edge element count = dim1_size + dim2_size -1 //Because now we have zeros ((m-1) + (n-1) - 1) long long int nDiag = m + n - 3; #ifdef DEBUG printf("nDiag=%d\n", nDiag); printf("Number of wavefront lines and their first element positions:\n"); #endif #pragma omp parallel { #pragma omp master { thread_count = omp_get_num_threads(); printf ("Using %d out of max %d threads...", thread_count, omp_get_max_threads()); } } //Gets Initial time double initialTime = omp_get_wtime(); // #pragma omp parallel default(none) shared(H, P, maxPos, nDiag, j) private(i) { for (i = 1; i <= nDiag; ++i) // start from 1 since 0 is the boundary padding { long long int nEle, si, sj; nEle = nElement(i); calcFirstDiagElement(i, &si, &sj); #pragma omp parallel for private(j) shared (nEle, si, sj, H, P, maxPos) if (nEle>=CUTOFF) for (j = 0; j < nEle; ++j) { // going upwards : anti-diagnol direction long long int ai = si - j ; // going up vertically long long int aj = sj + j; // going right in horizontal similarityScore(ai, aj, H, P, &maxPos); // a critical section is used inside } } // for end nDiag } // end omp parallel double finalTime = omp_get_wtime(); printf("\nElapsed time for scoring matrix computation: %f\n", finalTime - initialTime); initialTime = omp_get_wtime(); backtrack(P, maxPos); finalTime = omp_get_wtime(); //Gets backtrack time finalTime = omp_get_wtime(); printf("Elapsed time for backtracking: %f\n", finalTime - initialTime); if (useBuiltInData) { printf ("Verifying results using the builtinIn data: %s\n", (H[n*m-1]==7)?"true":"false"); assert (H[n*m-1]==7); } #ifdef DEBUG printf("\nSimilarity Matrix:\n"); printMatrix(H); printf("\nPredecessor Matrix:\n"); printPredecessorMatrix(P); #endif //Frees similarity matrixes free(H); free(P); //Frees input arrays free(a); free(b); return 0; } /* End of main */ /*-------------------------------------------------------------------- * Function: nElement * Purpose: Calculate the number of i-diagonal's elements * i value range 1 to nDiag. we inclulde the upper bound value. 0 is for the padded wavefront, which is ignored. */ long long int nElement(long long int i) { if (i < m && i < n) { // smaller than both directions //Number of elements in the diagonal is increasing return i; } else if (i < max(m, n)) { // smaller than only one direction //Number of elements in the diagonal is stable long int min = min(m, n); // the longer direction has the edge elements, the number is the smaller direction's size return min - 1; } else { //Number of elements in the diagonal is decreasing long int min = min(m, n); return 2 * min - i + abs(m - n) - 2; } } /*-------------------------------------------------------------------- * Function: calcElement: expect valid i value is from 1 to nDiag. since the first one is 0 padding * Purpose: Calculate the position of (si, sj)-element * n rows, m columns: we sweep the matrix on the left edge then bottom edge to get the wavefront */ void calcFirstDiagElement(long long int i, long long int *si, long long int *sj) { // Calculate the first element of diagonal if (i < n) { // smaller than row count *si = i; *sj = 1; // start from the j==1 since j==0 is the padding } else { // now we sweep horizontally at the bottom of the matrix *si = n - 1; // i is fixed *sj = i - n + 2; // j position is the nDiag (id -n) +1 +1 // first +1 } } /* // understanding the calculation by an example n =6 // row m =2 // col padded scoring matrix n=7 m=3 0 1 2 ------- 0 x x x 1 x x x 2 x x x 3 x x x 4 x x x 5 x x x 6 x x x We should peel off top row and left column since they are the padding the remaining 6x2 sub matrix is what is interesting for us Now find the number of wavefront lines and their first element's position in the scoring matrix total diagnol frontwave = (n-1) + (m-1) -1 // submatrix row+column -1 We use the left most element in each wavefront line as its first element. Then we have the first elements like (1,1), (2,1) (3,1) .. (6,1) (6,2) */ /*-------------------------------------------------------------------- * Function: SimilarityScore * Purpose: Calculate value of scoring matrix element H(i,j) : the maximum Similarity-Score H(i,j) * int *P; the predecessor array,storing which of the three elements is picked with max value */ void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) { int up, left, diag; //Stores index of element long long int index = m * i + j; //Get element above up = H[index - m] + gapScore; //Get element on the left left = H[index - 1] + gapScore; //Get element on the diagonal diag = H[index - m - 1] + matchMissmatchScore(i, j); //Calculates the maximum int max = NONE; int pred = NONE; /* === Matrix === * a[0] ... a[n] * b[0] * ... * b[n] * * generate 'a' from 'b', if '←' insert e '↑' remove * a=GAATTCA * b=GACTT-A * * generate 'b' from 'a', if '←' insert e '↑' remove * b=GACTT-A * a=GAATTCA */ if (diag > max) { //same letter ↖ max = diag; pred = DIAGONAL; } if (up > max) { //remove letter ↑ max = up; pred = UP; } if (left > max) { //insert letter ← max = left; pred = LEFT; } //Inserts the value in the similarity and predecessor matrixes H[index] = max; P[index] = pred; //Updates maximum score to be used as seed on backtrack #pragma omp critical if (max > H[*maxPos]) { *maxPos = index; } } /* End of similarityScore */ /*-------------------------------------------------------------------- * Function: matchMissmatchScore * Purpose: Similarity function on the alphabet for match/missmatch */ int matchMissmatchScore(long long int i, long long int j) { if (a[j - 1] == b[i - 1]) return matchScore; else return missmatchScore; } /* End of matchMissmatchScore */ /*-------------------------------------------------------------------- * Function: backtrack * Purpose: Modify matrix to print, path change from value to PATH */ void backtrack(int* P, long long int maxPos) { //hold maxPos value long long int predPos; //backtrack from maxPos to startPos = 0 do { if (P[maxPos] == DIAGONAL) predPos = maxPos - m - 1; else if (P[maxPos] == UP) predPos = maxPos - m; else if (P[maxPos] == LEFT) predPos = maxPos - 1; P[maxPos] *= PATH; maxPos = predPos; } while (P[maxPos] != NONE); } /* End of backtrack */ /*-------------------------------------------------------------------- * Function: printMatrix * Purpose: Print Matrix */ void printMatrix(int* matrix) { long long int i, j; printf("-\t-\t"); for (j = 0; j < m-1; j++) { printf("%c\t", a[j]); } printf("\n-\t"); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c\t", b[i-1]); printf("%d\t", matrix[m * i + j]); } printf("\n"); } } /* End of printMatrix */ /*-------------------------------------------------------------------- * Function: printPredecessorMatrix * Purpose: Print predecessor matrix */ void printPredecessorMatrix(int* matrix) { long long int i, j, index; printf(" "); for (j = 0; j < m-1; j++) { printf("%c ", a[j]); } printf("\n "); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c ", b[i-1]); index = m * i + j; if (matrix[index] < 0) { printf(BOLDRED); if (matrix[index] == -UP) printf("↑ "); else if (matrix[index] == -LEFT) printf("← "); else if (matrix[index] == -DIAGONAL) printf("↖ "); else printf("- "); printf(RESET); } else { if (matrix[index] == UP) printf("↑ "); else if (matrix[index] == LEFT) printf("← "); else if (matrix[index] == DIAGONAL) printf("↖ "); else printf("- "); } } printf("\n"); } } /* End of printPredecessorMatrix */ /*-------------------------------------------------------------------- * Function: generate * Purpose: Generate arrays a and b */ void generate() { //Random seed srand(time(NULL)); //Generates the values of a long long int i; for (i = 0; i < m; i++) { int aux = rand() % 4; if (aux == 0) a[i] = 'A'; else if (aux == 2) a[i] = 'C'; else if (aux == 3) a[i] = 'G'; else a[i] = 'T'; } //Generates the values of b for (i = 0; i < n; i++) { int aux = rand() % 4; if (aux == 0) b[i] = 'A'; else if (aux == 2) b[i] = 'C'; else if (aux == 3) b[i] = 'G'; else b[i] = 'T'; } } /* End of generate */ /*-------------------------------------------------------------------- * External References: * http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 * http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm * http://baba.sourceforge.net/ */
GB_unaryop__ainv_int8_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int8_fp64 // op(A') function: GB_tran__ainv_int8_fp64 // C type: int8_t // A type: double // cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8) // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int8_t z ; GB_CAST_SIGNED(z,x,8) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int8_fp64 ( int8_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int8_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
interpolate_v2_op.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/phi/core/hostdevice.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace operators { template <typename T, size_t D, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>; using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; inline std::vector<int> get_new_shape( const std::vector<const Tensor*>& list_new_shape_tensor) { // get tensor from std::vector<int> vec_new_shape; for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) { auto tensor = list_new_shape_tensor[i]; PADDLE_ENFORCE_EQ(tensor->dims(), phi::make_ddim({1}), platform::errors::InvalidArgument( "The shape of dimension tensor should be [1]," "but received d%.", tensor->dims())); if (platform::is_gpu_place(tensor->place())) { framework::Tensor temp; paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &temp); vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>())); } else { vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>())); } } return vec_new_shape; } template <typename T> inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) { std::vector<T> vec_new_data; auto* new_data = new_data_tensor->data<T>(); framework::Tensor cpu_starts_tensor; if (platform::is_gpu_place(new_data_tensor->place())) { paddle::framework::TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #ifdef PADDLE_WITH_ASCEND_CL if (platform::is_npu_place(new_data_tensor->place())) { paddle::framework::TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #endif vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel()); return vec_new_data; } inline void ExtractNCDWH(const framework::DDim& dims, const DataLayout& data_layout, int* N, int* C, int* D, int* H, int* W) { *N = dims[0]; if (dims.size() == 3) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2]; *D = 1; *H = 1; *W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; } else if (dims.size() == 4) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3]; *D = 1; *H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; } else { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4]; *D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; *W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3]; } } template <typename T> static void NearestNeighborInterpolate(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = input_t(i, j, in_k, in_l); } else { output_t(i, k, l, j) = input_t(i, in_k, in_l, j); } } } } } } template <typename T> static void NearestNeighbor3DInterpolate( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); for (int d = 0; d < out_d; d++) { // loop for images int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5) : static_cast<int>(ratio_d * d); for (int k = 0; k < out_h; k++) { int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, d, k, l) = input_t(i, j, in_d, in_k, in_l); } else { // NDHWC output_t(i, d, k, l, j) = input_t(i, in_d, in_k, in_l, j); } } } } } } } template <typename T> static void LinearInterpolation(const Tensor& input, Tensor* output, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 3>::From(input); auto output_t = EigenTensor<T, 3>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(3) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int l = 0; l < out_w; l++) { // linear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vx_w[l]) * vd_e[l] + input_t(i, j, vx_e[l]) * vd_w[l]; output_t(i, j, l) = out_t; } else { out_t = input_t(i, vx_w[l], j) * vd_e[l] + input_t(i, vx_e[l], j) * vd_w[l]; output_t(i, l, j) = out_t; } } } } } template <typename T> static void LinearInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 3>::From(*input_grad); auto output_grad_t = EigenTensor<T, 3>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // linear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, l); input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e); input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w); } else { const T grad = output_grad_t(i, l, j); input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e); input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w); } } } } } template <typename T> static void BilinearInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int k = 0; k < out_h; k++) { // loop for images for (int l = 0; l < out_w; l++) { // bilinear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] + input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] + input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] + input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l]; output_t(i, j, k, l) = out_t; } else { out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] + input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] + input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] + input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l]; output_t(i, k, l, j) = out_t; } } } } } } template <typename T> static void TrilinearInterpolation( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vt_f, vt_b; std::vector<float> vd_f, vd_b; vt_f.reserve(out_d); vt_b.reserve(out_d); vd_f.reserve(out_d); vd_b.reserve(out_d); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int j = 0; j < out_d; j++) { int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; { vt_f[j] = t_f; vt_b[j] = t_b; vd_f[j] = d_f; vd_b[j] = d_b; } } std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(5) #endif for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels for (int j = 0; j < out_d; j++) { // loop for D, H, W for (int k = 0; k < out_h; k++) { for (int l = 0; l < out_w; l++) { // trilinear interpolation if (data_layout == DataLayout::kNCHW) { T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, i, j, k, l) = out_t; } else { T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, j, k, l, i) = out_t; } } } } } } } template <typename T> HOSTDEVICE inline T cubic_convolution1(T x, T A) { return ((A + 2) * x - (A + 3)) * x * x + 1; } template <typename T> HOSTDEVICE inline T cubic_convolution2(T x, T A) { return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A; } template <typename T> HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) { T A = -0.75; T x1 = t; coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A); coeffs[1] = cubic_convolution1<T>(x1, A); // opposite coefficients T x2 = 1.0 - t; coeffs[2] = cubic_convolution1<T>(x2, A); coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A); } template <typename T> static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) { T coeffs[4]; get_cubic_upsample_coefficients<T>(coeffs, t); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> static void BicubicInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); const T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); const T x_t = x_n - input_x; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels T coefficients[4]; // interp 4 times in x direction for (int ii = 0; ii < 4; ii++) { int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1), static_cast<int>(0)); int access_x_0 = std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0)); int access_x_1 = std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0)); int access_x_2 = std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0)); int access_x_3 = std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { coefficients[ii] = cubic_interp<T>(input_t(i, j, access_y, access_x_0), input_t(i, j, access_y, access_x_1), input_t(i, j, access_y, access_x_2), input_t(i, j, access_y, access_x_3), x_t); } else { coefficients[ii] = cubic_interp<T>(input_t(i, access_y, access_x_0, j), input_t(i, access_y, access_x_1, j), input_t(i, access_y, access_x_2, j), input_t(i, access_y, access_x_3, j), x_t); } } // interp y direction if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { output_t(i, k, l, j) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } } } } } } template <typename T> static void NearestNeighborInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l); } else { input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j); } } } } } } template <typename T> static void NearestNeighbor3DInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); for (int d = 0; d < out_d; d++) { int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5) : static_cast<int>(ratio_d * d); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_d, in_k, in_l) += output_grad_t(i, j, d, k, l); } else { input_grad_t(i, in_d, in_k, in_l, j) += output_grad_t(i, d, k, l, j); } } } } } } } template <typename T> static void BilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int k = 0; k < out_h; k++) { // loop for images int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w); } else { const T grad = output_grad_t(i, k, l, j); input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w); } } } } } } template <typename T> static void TrilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int j = 0; j < out_d; j++) { // loop for D int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; for (int k = 0; k < out_h; k++) { // loop for H int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { // loop for W int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels // trilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(b, i, j, k, l); input_grad_t(b, i, t_f, y_n, x_w) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, i, t_f, y_n, x_e) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, i, t_f, y_s, x_w) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, i, t_f, y_s, x_e) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, i, t_b, y_n, x_w) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, i, t_b, y_n, x_e) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, i, t_b, y_s, x_w) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, i, t_b, y_s, x_e) += static_cast<T>(grad * d_f * d_n * d_w); } else { const T grad = output_grad_t(b, j, k, l, i); input_grad_t(b, t_f, y_n, x_w, i) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, t_f, y_n, x_e, i) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, t_f, y_s, x_w, i) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, t_f, y_s, x_e, i) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, t_b, y_n, x_w, i) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, t_b, y_n, x_e, i) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, t_b, y_s, x_w, i) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, t_b, y_s, x_e, i) += static_cast<T>(grad * d_f * d_n * d_w); } } } } } } } template <typename T> static void BicubicInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); T x_t = x_n - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients<T>(x_coeffs, x_t); get_cubic_upsample_coefficients<T>(y_coeffs, y_t); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bicubic interpolation grad for (int ii = 0; ii < 4; ii++) { for (int jj = 0; jj < 4; jj++) { int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1), static_cast<int>(0)); int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, access_y, access_x) += grad * y_coeffs[jj] * x_coeffs[ii]; } else { T grad = output_grad_t(i, k, l, j); input_grad_t(i, access_y, access_x, j) += grad * y_coeffs[jj] * x_coeffs[ii]; } } } } } } } } template <typename T> static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); float scale_w = -1.; if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } else { // float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_w > 0. && scale_h > 0. && scale_d > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighbor3DInterpolate<T>(input, output, ratio_d, ratio_h, ratio_w, n, c, out_d, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale_w = -1.0; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); phi::funcs::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_w = scale_data[0]; scale_h = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); phi::funcs::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_d > 0. && scale_h > 0. && scale_w > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); phi::funcs::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolationGrad<T>( output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighbor3DInterpolateGrad<T>(output_grad, input_grad, ratio_d, ratio_h, ratio_w, n, c, out_d, out_h, out_w, align_corners, data_layout); } } template <typename T> class InterpolateV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCPUFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation grad Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation grad Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation grad Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle
c-tree.h
/* Definitions for C parsing and type checking. Copyright (C) 1987-2019 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-family/c-common.h" #include "diagnostic.h" /* struct lang_identifier is private to c-decl.c, but langhooks.c needs to know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof(struct c_common_identifier) + 3 * sizeof(void*)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2(TYPE) /* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0(TYPE) /* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD(TYPE) /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0(ID) /* Record whether a type or decl was written with nonconstant size. Note that TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1(TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0(TYPE) /* Record whether a type is defined inside a struct or union type. This is used for -Wc++-compat. */ #define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2(TYPE) /* Record whether an "incomplete type" error was given for the type. */ #define C_TYPE_ERROR_REPORTED(TYPE) TYPE_LANG_FLAG_3(TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1(EXP) /* For a FUNCTION_DECL, nonzero if it was defined without an explicit return type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1(EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2(EXP) /* For a PARM_DECL, nonzero if it was declared as an array. */ #define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0(NODE) /* For FUNCTION_DECLs, evaluates true if the decl is built-in but has been declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) DECL_LANG_FLAG_3(FUNCTION_DECL_CHECK(EXP)) /* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) DECL_LANG_FLAG_6(FUNCTION_DECL_CHECK(EXP)) /* Record whether a decl was declared register. This is strictly a front-end flag, whereas DECL_REGISTER is used for code generation; they may differ for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4(EXP) /* Record whether a decl was used in an expression anywhere except an unevaluated operand of sizeof / typeof / alignof. This is only used for functions declared static but not defined, though outside sizeof and typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5(FUNCTION_DECL_CHECK(EXP)) /* Record whether a variable has been declared threadprivate by #pragma omp threadprivate. */ #define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3(VAR_DECL_CHECK(DECL)) /* Set on VAR_DECLs for compound literals. */ #define C_DECL_COMPOUND_LITERAL_P(DECL) DECL_LANG_FLAG_5(VAR_DECL_CHECK(DECL)) /* Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. Could be simplified if all built-in decls had complete prototypes (but this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 || (!prototype_p(TREE_TYPE(EXP)) && !fndecl_built_in_p(EXP))) /* For FUNCTION_TYPE, a hidden list of types of arguments. The same as TYPE_ARG_TYPES for functions with prototypes, but created for functions without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1(NODE) /* For a CONSTRUCTOR, whether some initializer contains a subexpression meaning it is not a constant expression. */ #define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1(CONSTRUCTOR_CHECK(EXPR)) /* For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already been folded. */ #define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1(SAVE_EXPR_CHECK(EXP)) /* Record parser information about an expression that is irrelevant for code generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* Record the original unary/binary operator of an expression, which may have been changed by fold, STRING_CST for unparenthesized string constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls (even if parenthesized), for subexpressions, and for non-constant initializers, or ERROR_MARK for other expressions (including parenthesized expressions). */ enum tree_code original_code; /* If not NULL, the original type of an expression. This will differ from the type of the value field for an enum constant. The type of an enum constant is a plain integer type, but this field will be the enum type. */ tree original_type; /* The source range of this expression. This is redundant for node values that have locations, but not all node kinds have locations (e.g. constants, and references to params, locals, etc), so we stash a copy here. */ source_range src_range; /* Access to the first and last locations within the source spelling of this expression. */ location_t get_start() const { return src_range.m_start; } location_t get_finish() const { return src_range.m_finish; } location_t get_location() const { if (EXPR_HAS_LOCATION(value)) return EXPR_LOCATION(value); else return make_location(get_start(), get_start(), get_finish()); } /* Set the value to error_mark_node whilst ensuring that src_range is initialized. */ void set_error() { value = error_mark_node; src_range.m_start = UNKNOWN_LOCATION; src_range.m_finish = UNKNOWN_LOCATION; } }; /* Type alias for struct c_expr. This allows to use the structure inside the VEC types. */ typedef struct c_expr c_expr_t; /* A kind of type specifier. Note that this information is currently only used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* No typespec. This appears only in struct c_declspec. */ ctsk_none, /* A reserved keyword type specifier. */ ctsk_resword, /* A reference to a tag, previously declared, such as "struct foo". This includes where the previous declaration was as a different kind of tag, in which case this is only valid if shadowing that tag in an inner scope. */ ctsk_tagref, /* A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier, or _Atomic ( type-name ). */ ctsk_typeof }; /* A type specifier: this structure is created in the parser and passed to declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* Whether the expression has operands suitable for use in constant expressions. */ bool expr_const_operands; /* The specifier itself. */ tree spec; /* An expression to be evaluated before the type specifier, in the case of typeof specifiers, or NULL otherwise or if no such expression is required for a particular typeof specifier. In particular, when typeof is applied to an expression of variably modified type, that expression must be evaluated in order to determine array sizes that form part of the type, but the expression itself (as opposed to the array sizes) forms no part of the type and so needs to be recorded separately. */ tree expr; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* A type specifier keyword "void", "_Bool", "char", "int", "float", "double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum", or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_int_n, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128, cts_floatn_nx, cts_fract, cts_accum, cts_auto_type }; /* This enum lists all the possible declarator specifiers, storage class or attribute that a user can write. There is at least one enumerator per possible declarator specifier in the struct c_declspecs below. It is used to index the array of declspec locations in struct c_declspecs. */ enum c_declspec_word { cdw_typespec /* A catch-all for a typespec. */, cdw_storage_class /* A catch-all for a storage class */, cdw_attributes, cdw_typedef, cdw_explicit_signed, cdw_deprecated, cdw_default_int, cdw_long, cdw_long_long, cdw_short, cdw_signed, cdw_unsigned, cdw_complex, cdw_inline, cdw_noreturn, cdw_thread, cdw_const, cdw_volatile, cdw_restrict, cdw_atomic, cdw_saturating, cdw_alignas, cdw_address_space, cdw_gimple, cdw_rtl, cdw_number_of_elements /* This one must always be the last enumerator. */ }; enum c_declspec_il { cdil_none, cdil_gimple, /* __GIMPLE */ cdil_gimple_cfg, /* __GIMPLE(cfg) */ cdil_gimple_ssa, /* __GIMPLE(ssa) */ cdil_rtl /* __RTL */ }; /* A sequence of declaration specifiers in C. When a new declaration specifier is added, please update the enum c_declspec_word above accordingly. */ struct c_declspecs { location_t locations[cdw_number_of_elements]; /* The type specified, if a single type specifier such as a struct, union or enum specifier, typedef name or typeof specifies the whole type, or NULL_TREE if none or a keyword such as "void" or "char" is used. Does not include qualifiers. */ tree type; /* Any expression to be evaluated before the type, from a typeof specifier. */ tree expr; /* The attributes from a typedef decl. */ tree decl_attr; /* When parsing, the attributes. Outside the parser, this will be NULL; attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* The pass to start compiling a __GIMPLE or __RTL function with. */ char* gimple_or_rtl_pass; /* The base-2 log of the greatest alignment required by an _Alignas specifier, in bytes, or -1 if no such specifiers with nonzero alignment. */ int align_log; /* For the __intN declspec, this stores the index into the int_n_* arrays. */ int int_n_idx; /* For the _FloatN and _FloatNx declspec, this stores the index into the floatn_nx_types array. */ int floatn_nx_idx; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* Any type specifier keyword used such as "int", not reflecting modifiers such as "short", or cts_none if none. */ ENUM_BITFIELD(c_typespec_keyword) typespec_word : 8; /* The kind of type specifier if one has been seen, ctsk_none otherwise. */ ENUM_BITFIELD(c_typespec_kind) typespec_kind : 3; ENUM_BITFIELD(c_declspec_il) declspec_il : 3; /* Whether any expressions in typeof specifiers may appear in constant expressions. */ BOOL_BITFIELD expr_const_operands : 1; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p : 1; /* Whether something other than a storage class specifier or attribute has been seen. This is used to warn for the obsolescent usage of storage class specifiers other than at the start of the list. (Doing this properly would require function specifiers to be handled separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p : 1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p : 1; /* Whether the type is explicitly "signed" or specified by a typedef whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p : 1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p : 1; /* Whether the type defaulted to "int" because there were no type specifiers. */ BOOL_BITFIELD default_int_p : 1; /* Whether "long" was specified. */ BOOL_BITFIELD long_p : 1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p : 1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p : 1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p : 1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p : 1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p : 1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p : 1; /* Whether "_Noreturn" was speciied. */ BOOL_BITFIELD noreturn_p : 1; /* Whether "__thread" or "_Thread_local" was specified. */ BOOL_BITFIELD thread_p : 1; /* Whether "__thread" rather than "_Thread_local" was specified. */ BOOL_BITFIELD thread_gnu_p : 1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p : 1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p : 1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p : 1; /* Whether "_Atomic" was specified. */ BOOL_BITFIELD atomic_p : 1; /* Whether "_Sat" was specified. */ BOOL_BITFIELD saturating_p : 1; /* Whether any alignment specifier (even with zero alignment) was specified. */ BOOL_BITFIELD alignas_p : 1; /* The address space that the declaration belongs to. */ addr_space_t address_space; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; struct c_arg_tag { /* The argument name. */ tree id; /* The type of the argument. */ tree type; }; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ vec<c_arg_tag, va_gc>* tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* A list of non-parameter decls (notably enumeration constants) defined with the parameters. */ tree others; /* A compound expression of VLA sizes from the parameters, or NULL. In a function definition, these are used to ensure that side-effects in sizes of arrays converted to pointers (such as a parameter int i[n++]) take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec : 1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; location_t id_loc; /* Currently only set for cdk_id, cdk_array. */ /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator* declarator; union { /* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract declarator. */ tree id; /* For functions. */ struct c_arg_info* arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p : 1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p : 1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs* specs; /* The declarator. */ struct c_declarator* declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs* specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator* declarator; /* The location of the parameter. */ location_t loc; }; /* Used when parsing an enum. Initialized by start_enum. */ struct c_enum_contents { /* While defining an enum type, this is 1 plus the last enumerator constant value. */ tree enum_next_value; /* Nonzero means that there was overflow computing enum_next_value. */ int enum_overflow; }; /* A type of reference to a static identifier in an inline function. */ enum c_inline_static_type { /* Identifier with internal linkage used in function that may be an inline definition (i.e., file-scope static). */ csi_internal, /* Modifiable object with static storage duration defined in function that may be an inline definition (i.e., local static). */ csi_modifiable }; /* in c-parser.c */ extern void c_parse_init(void); extern bool c_keyword_starts_typename(enum rid keyword); /* in c-aux-info.c */ extern void gen_aux_info_record(tree, int, int, int); /* in c-decl.c */ struct c_spot_bindings; struct c_struct_parse_info; extern struct obstack parser_obstack; extern tree c_break_label; extern tree c_cont_label; extern bool global_bindings_p(void); extern tree pushdecl(tree); extern void push_scope(void); extern tree pop_scope(void); extern void c_bindings_start_stmt_expr(struct c_spot_bindings*); extern void c_bindings_end_stmt_expr(struct c_spot_bindings*); extern void record_inline_static(location_t, tree, tree, enum c_inline_static_type); extern void c_init_decl_processing(void); extern void c_print_identifier(FILE*, tree, int); extern int quals_from_declspecs(const struct c_declspecs*); extern struct c_declarator* build_array_declarator(location_t, tree, struct c_declspecs*, bool, bool); extern tree build_enumerator(location_t, location_t, struct c_enum_contents*, tree, tree); extern tree check_for_loop_decls(location_t, bool); extern void mark_forward_parm_decls(void); extern void declare_parm_level(void); extern void undeclared_variable(location_t, tree); extern tree lookup_label_for_goto(location_t, tree); extern tree declare_label(tree); extern tree define_label(location_t, tree); extern struct c_spot_bindings* c_get_switch_bindings(void); extern void c_release_switch_bindings(struct c_spot_bindings*); extern bool c_check_switch_jump_warnings(struct c_spot_bindings*, location_t, location_t); extern void finish_decl(tree, location_t, tree, tree, tree); extern tree finish_enum(tree, tree, tree); extern void finish_function(void); extern tree finish_struct(location_t, tree, tree, tree, struct c_struct_parse_info*); extern struct c_arg_info* build_arg_info(void); extern struct c_arg_info* get_parm_info(bool, tree); extern tree grokfield(location_t, struct c_declarator*, struct c_declspecs*, tree, tree*); extern tree groktypename(struct c_type_name*, tree*, bool*); extern tree grokparm(const struct c_parm*, tree*); extern tree implicitly_declare(location_t, tree); extern void keep_next_level(void); extern void pending_xref_error(void); extern void c_push_function_context(void); extern void c_pop_function_context(void); extern void push_parm_decl(const struct c_parm*, tree*); extern struct c_declarator* set_array_declarator_inner(struct c_declarator*, struct c_declarator*); extern tree c_builtin_function(tree); extern tree c_builtin_function_ext_scope(tree); extern void shadow_tag(const struct c_declspecs*); extern void shadow_tag_warned(const struct c_declspecs*, int); extern tree start_enum(location_t, struct c_enum_contents*, tree); extern bool start_function(struct c_declspecs*, struct c_declarator*, tree); extern tree start_decl(struct c_declarator*, struct c_declspecs*, bool, tree); extern tree start_struct(location_t, enum tree_code, tree, struct c_struct_parse_info**); extern void store_parm_decls(void); extern void store_parm_decls_from(struct c_arg_info*); extern void temp_store_parm_decls(tree, tree); extern void temp_pop_parm_decls(void); extern tree xref_tag(enum tree_code, tree); extern struct c_typespec parser_xref_tag(location_t, enum tree_code, tree); extern struct c_parm* build_c_parm(struct c_declspecs*, tree, struct c_declarator*, location_t); extern struct c_declarator* build_attrs_declarator(tree, struct c_declarator*); extern struct c_declarator* build_function_declarator(struct c_arg_info*, struct c_declarator*); extern struct c_declarator* build_id_declarator(tree); extern struct c_declarator* make_pointer_declarator(struct c_declspecs*, struct c_declarator*); extern struct c_declspecs* build_null_declspecs(void); extern struct c_declspecs* declspecs_add_qual(location_t, struct c_declspecs*, tree); extern struct c_declspecs* declspecs_add_type(location_t, struct c_declspecs*, struct c_typespec); extern struct c_declspecs* declspecs_add_scspec(location_t, struct c_declspecs*, tree); extern struct c_declspecs* declspecs_add_attrs(location_t, struct c_declspecs*, tree); extern struct c_declspecs* declspecs_add_addrspace(location_t, struct c_declspecs*, addr_space_t); extern struct c_declspecs* declspecs_add_alignas(location_t, struct c_declspecs*, tree); extern struct c_declspecs* finish_declspecs(struct c_declspecs*); /* in c-objc-common.c */ extern bool c_objc_common_init(void); extern bool c_missing_noreturn_ok_p(tree); extern bool c_warn_unused_global_decl(const_tree); extern void c_initialize_diagnostics(diagnostic_context*); extern bool c_vla_unspec_p(tree x, tree fn); extern alias_set_type c_get_alias_set(tree); /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern tree c_last_sizeof_arg; extern location_t c_last_sizeof_loc; extern struct c_switch* c_switch_stack; extern tree c_objc_common_truthvalue_conversion(location_t, tree); extern tree require_complete_type(location_t, tree); extern bool same_translation_unit_p(const_tree, const_tree); extern int comptypes(tree, tree); extern int comptypes_check_different_types(tree, tree, bool*); extern bool c_vla_type_p(const_tree); extern bool c_mark_addressable(tree, bool = false); extern void c_incomplete_type_error(location_t, const_tree, const_tree); extern tree c_type_promotes_to(tree); extern struct c_expr default_function_array_conversion(location_t, struct c_expr); extern struct c_expr default_function_array_read_conversion(location_t, struct c_expr); extern struct c_expr convert_lvalue_to_rvalue(location_t, struct c_expr, bool, bool); extern tree decl_constant_value_1(tree, bool); extern void mark_exp_read(tree); extern tree composite_type(tree, tree); extern tree build_component_ref(location_t, tree, tree, location_t); extern tree build_array_ref(location_t, tree, tree); extern tree build_external_ref(location_t, tree, bool, tree*); extern void pop_maybe_used(bool); extern struct c_expr c_expr_sizeof_expr(location_t, struct c_expr); extern struct c_expr c_expr_sizeof_type(location_t, struct c_type_name*); extern struct c_expr parser_build_unary_op(location_t, enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op(location_t, enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr(location_t, tree, bool, tree, tree, location_t, tree, tree, location_t); extern tree build_compound_expr(location_t, tree, tree); extern tree c_cast_expr(location_t, struct c_type_name*, tree); extern tree build_c_cast(location_t, tree, tree); extern void store_init_value(location_t, tree, tree, tree); extern void maybe_warn_string_init(location_t, tree, struct c_expr); extern void start_init(tree, tree, int, rich_location*); extern void finish_init(void); extern void really_start_incremental_init(tree); extern void finish_implicit_inits(location_t, struct obstack*); extern void push_init_level(location_t, int, struct obstack*); extern struct c_expr pop_init_level(location_t, int, struct obstack*, location_t); extern void set_init_index(location_t, tree, tree, struct obstack*); extern void set_init_label(location_t, tree, location_t, struct obstack*); extern void process_init_element(location_t, struct c_expr, bool, struct obstack*); extern tree build_compound_literal(location_t, tree, tree, bool, unsigned int); extern void check_compound_literal_type(location_t, struct c_type_name*); extern tree c_start_case(location_t, location_t, tree, bool); extern void c_finish_case(tree, tree); extern tree build_asm_expr(location_t, tree, tree, tree, tree, tree, bool, bool); extern tree build_asm_stmt(bool, tree); extern int c_types_compatible_p(tree, tree); extern tree c_begin_compound_stmt(bool); extern tree c_end_compound_stmt(location_t, tree, bool); extern void c_finish_if_stmt(location_t, tree, tree, tree); extern void c_finish_loop(location_t, tree, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr(void); extern tree c_finish_stmt_expr(location_t, tree); extern tree c_process_expr_stmt(location_t, tree); extern tree c_finish_expr_stmt(location_t, tree); extern tree c_finish_return(location_t, tree, tree); extern tree c_finish_bc_stmt(location_t, tree*, bool); extern tree c_finish_goto_label(location_t, tree); extern tree c_finish_goto_ptr(location_t, tree); extern tree c_expr_to_decl(tree, bool*, bool*); extern tree c_finish_omp_construct(location_t, enum tree_code, tree, tree); extern tree c_finish_oacc_data(location_t, tree, tree); extern tree c_finish_oacc_host_data(location_t, tree, tree); extern tree c_begin_omp_parallel(void); extern tree c_finish_omp_parallel(location_t, tree, tree); extern tree c_begin_omp_task(void); extern tree c_finish_omp_task(location_t, tree, tree); extern void c_finish_omp_cancel(location_t, tree); extern void c_finish_omp_cancellation_point(location_t, tree); extern tree c_finish_omp_clauses(tree, enum c_omp_region_type); extern tree c_build_va_arg(location_t, tree, location_t, tree); extern tree c_finish_transaction(location_t, tree, int); extern bool c_tree_equal(tree, tree); extern tree c_build_function_call_vec(location_t, vec<location_t>, tree, vec<tree, va_gc>*, vec<tree, va_gc>*); extern tree c_omp_clause_copy_ctor(tree, tree, tree); /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ extern int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ extern int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ extern int current_function_returns_abnormally; /* In c-decl.c */ /* Tell the binding oracle what kind of binding we are looking for. */ enum c_oracle_request { C_ORACLE_SYMBOL, C_ORACLE_TAG, C_ORACLE_LABEL }; /* If this is non-NULL, then it is a "binding oracle" which can lazily create bindings when needed by the C compiler. The oracle is told the name and type of the binding to create. It can call pushdecl or the like to ensure the binding is visible; or do nothing, leaving the binding untouched. c-decl.c takes note of when the oracle has been called and will not call it again if it fails to create a given binding. */ typedef void c_binding_oracle_function(enum c_oracle_request, tree identifier); extern c_binding_oracle_function* c_binding_oracle; extern void c_finish_incomplete_decl(tree); extern tree c_omp_reduction_id(enum tree_code, tree); extern tree c_omp_reduction_decl(tree); extern tree c_omp_reduction_lookup(tree, tree); extern tree c_check_omp_declare_reduction_r(tree*, int*, void*); extern void c_pushtag(location_t, tree, tree); extern void c_bind(location_t, tree, bool); extern bool tag_exists_p(enum tree_code, tree); /* In c-errors.c */ extern bool pedwarn_c90(location_t, int opt, const char*, ...) ATTRIBUTE_GCC_DIAG(3, 4); extern bool pedwarn_c99(location_t, int opt, const char*, ...) ATTRIBUTE_GCC_DIAG(3, 4); extern bool pedwarn_c11(location_t, int opt, const char*, ...) ATTRIBUTE_GCC_DIAG(3, 4); extern void set_c_expr_source_range(c_expr* expr, location_t start, location_t finish); extern void set_c_expr_source_range(c_expr* expr, source_range src_range); /* In c-fold.c */ extern vec<tree> incomplete_record_decls; #if CHECKING_P namespace selftest { extern void run_c_tests(void); } // namespace selftest #endif /* #if CHECKING_P */ #endif /* ! GCC_C_TREE_H */
Example_array_sections.4.c
/* * @@name: array_sections.4c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_4.0 */ void foo () { int A[30], *p; #pragma omp target data map( A[0:10] ) { p = &A[0]; #pragma omp target map( p[3:7] ) { A[2] = 0; p[8] = 0; A[8] = 1; } } }
evaluation.c
#include "common.h" #ifdef _OPENMP static int top_down_step(const int level, const int nodes, const int num_frontier, const int degree, const int* restrict adj, int* restrict frontier, int* restrict next, int* restrict distance, char* restrict bitmap) { int count = 0; int local_frontier[nodes]; #pragma omp parallel private(local_frontier) { int local_count = 0; #pragma omp for nowait for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree;j++){ int n = *(adj + v * degree + j); // adj[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; distance[n] = level; local_frontier[local_count++] = n; } } } // end for i #pragma omp critical { memcpy(&next[count], local_frontier, local_count*sizeof(int)); count += local_count; } } return count; } #else static int top_down_step(const int level, const int nodes, const int num_frontier, const int degree, const int* restrict adj, int* restrict frontier, int* restrict next, int* restrict distance, char* restrict bitmap) { int count = 0; for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree;j++){ int n = *(adj + v * degree + j); // int n = adj[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; distance[n] = level; next[count++] = n; } } } return count; } #endif static bool bfs(const int nodes, int based_nodes, const int groups, const int lines, const int degree, const int* restrict adj, int* restrict diam, double* restrict ASPL, const int added_centers) { char *bitmap = malloc(sizeof(char) * nodes); int *frontier = malloc(sizeof(int) * nodes); int *distance = malloc(sizeof(int) * nodes); int *next = malloc(sizeof(int) * nodes); bool reached = true; double sum = 0.0; *diam = 0; for(int s=rank;s<based_nodes;s+=procs){ int num_frontier = 1, level = 0; for(int i=0;i<nodes;i++) bitmap[i] = NOT_VISITED; frontier[0] = s; distance[s] = level; bitmap[s] = VISITED; while(1){ num_frontier = top_down_step(level++, nodes, num_frontier, degree, adj, frontier, next, distance, bitmap); if(num_frontier == 0) break; int *tmp = frontier; frontier = next; next = tmp; } *diam = MAX(*diam, level-1); for(int i=s+1;i<nodes;i++){ if(bitmap[i] == NOT_VISITED) reached = false; if(i < groups*based_nodes) sum += (distance[i] + 1) * (groups - i/based_nodes); else sum += (distance[i] + 1) * groups; // for added_centers } } if(added_centers){ int start_rank = based_nodes % procs; int start_node = based_nodes*groups+rank-start_rank; if(start_node < based_nodes*groups) start_node += procs; for(int s=start_node;s<nodes;s+=procs){ int num_frontier = 1, level = 0; for(int i=0;i<nodes;i++) bitmap[i] = NOT_VISITED; frontier[0] = s; distance[s] = level; bitmap[s] = VISITED; while(1){ num_frontier = top_down_step(level++, nodes, num_frontier, degree, adj, frontier, next, distance, bitmap); if(num_frontier == 0) break; int *tmp = frontier; frontier = next; next = tmp; } *diam = MAX(*diam, level-1); for(int i=s+1;i<nodes;i++){ if(bitmap[i] == NOT_VISITED) reached = false; sum += distance[i] + 1; } } } free(bitmap); free(frontier); free(distance); free(next); MPI_Allreduce(MPI_IN_PLACE, &reached, 1, MPI_C_BOOL, MPI_LAND, MPI_COMM_WORLD); if(reached){ MPI_Allreduce(MPI_IN_PLACE, diam, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); *ASPL = sum / ((((double)nodes-1)*nodes)/2); } else{ *diam = INT_MAX; *ASPL = DBL_MAX; timer_stop(TIMER_APSP); } return reached; } static bool matrix_op(const int nodes, const int based_nodes, const int degree, const int* restrict adj, const int groups, int* restrict diam, double* restrict ASPL, const int added_centers) { unsigned int elements = (based_nodes+(UINT64_BITS-1))/UINT64_BITS; unsigned int chunk = (elements+(procs-1))/procs; size_t s = nodes*chunk*sizeof(uint64_t); uint64_t* A = malloc(s); // uint64_t A[nodes][chunk]; uint64_t* B = malloc(s); // uint64_t B[nodes][chunk]; int parsize = (elements+(chunk-1))/chunk; double sum = 0.0; *diam = 1; for(int t=rank;t<parsize;t+=procs){ uint64_t kk, l; clear_buffers(A, B, nodes*chunk); for(l=0; l<UINT64_BITS*chunk && UINT64_BITS*t*chunk+l<based_nodes; l++){ unsigned int offset = (UINT64_BITS*t*chunk+l)*chunk+l/UINT64_BITS; A[offset] = B[offset] = (0x1ULL<<(l%UINT64_BITS)); } for(kk=0;kk<nodes;kk++){ #pragma omp parallel for for(int i=0;i<nodes;i++) for(int j=0;j<degree;j++){ int n = *(adj + i * degree + j); // int n = adj[i][j]; for(int k=0;k<chunk;k++) B[i*chunk+k] |= A[n*chunk+k]; } uint64_t num1 = 0, num2 = 0; #pragma omp parallel for reduction(+:num1) for(int i=0;i<based_nodes*groups*chunk;i++) num1 += POPCNT(B[i]); #pragma omp parallel for reduction(+:num2) for(int i=based_nodes*groups*chunk;i<nodes*chunk;i++) num2 += POPCNT(B[i]); if(num1+num2 == (uint64_t)nodes*l) break; // swap A <-> B uint64_t* tmp = A; A = B; B = tmp; sum += ((double)based_nodes*groups * l - num1) * groups; sum += ((double)added_centers * l - num2) * groups * 2; } *diam = MAX(*diam, kk+1); } if(added_centers){ elements = (added_centers+(UINT64_BITS-1))/UINT64_BITS; chunk = (elements+(procs-1))/procs; parsize = (elements+(chunk-1))/chunk; int s = based_nodes % procs; int new_rank = (rank - s >= 0)? rank-s : rank-s+procs; for(int t=new_rank;t<parsize;t+=procs){ uint64_t kk, l; clear_buffers(A, B, nodes*chunk); for(l=0; l<UINT64_BITS*chunk && UINT64_BITS*t*chunk+l<added_centers; l++){ unsigned int offset = (UINT64_BITS*t*chunk+l+(nodes-added_centers))*chunk+l/UINT64_BITS; A[offset] = B[offset] = (0x1ULL<<(l%UINT64_BITS)); } for(kk=0;kk<nodes;kk++){ #pragma omp parallel for for(int i=0;i<nodes;i++) for(int j=0;j<degree;j++){ int n = *(adj + i * degree + j); // int n = adj[i][j]; for(int k=0;k<chunk;k++) B[i*chunk+k] |= A[n*chunk+k]; } uint64_t num = 0; #pragma omp parallel for reduction(+:num) for(int i=based_nodes*groups*chunk;i<nodes*chunk;i++) num += POPCNT(B[i]); if(num == (uint64_t)added_centers*l) break; // swap A <-> B uint64_t* tmp = A; A = B; B = tmp; sum += ((double)added_centers * l - num); } *diam = MAX(*diam, kk+1); } } MPI_Allreduce(MPI_IN_PLACE, diam, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); sum += (double)nodes * (nodes - 1); free(A); free(B); if(*diam < nodes){ *ASPL = sum / (((double)nodes-1)*nodes); return true; } else{ *diam = INT_MAX; *ASPL = DBL_MAX; return false; } } static bool matrix_op_mem_saving(const int nodes, const int based_nodes, const int degree, const int* restrict adj, const int groups, int* restrict diam, double* restrict ASPL, const int added_centers) { unsigned int elements = (based_nodes+(UINT64_BITS-1))/UINT64_BITS; size_t s = nodes*CHUNK*sizeof(uint64_t); uint64_t* A = malloc(s); // uint64_t A[nodes][CHUNK]; uint64_t* B = malloc(s); // uint64_t B[nodes][CHUNK]; int parsize = (elements+(CHUNK-1))/CHUNK; double sum = 0.0; *diam = 1; for(int t=rank;t<parsize;t+=procs){ unsigned int kk, l; clear_buffers(A, B, nodes*CHUNK); for(l=0; l<UINT64_BITS*CHUNK && UINT64_BITS*t*CHUNK+l<based_nodes; l++){ unsigned int offset = (UINT64_BITS*t*CHUNK+l)*CHUNK+l/UINT64_BITS; A[offset] = B[offset] = (0x1ULL<<(l%UINT64_BITS)); } for(kk=0;kk<nodes;kk++){ #pragma omp parallel for for(int i=0;i<nodes;i++) for(int j=0;j<degree;j++){ int n = *(adj + i * degree + j); // int n = adj[i][j]; for(int k=0;k<CHUNK;k++) B[i*CHUNK+k] |= A[n*CHUNK+k]; } uint64_t num1 = 0, num2 = 0; #pragma omp parallel for reduction(+:num1) for(int i=0;i<based_nodes*groups*CHUNK;i++) num1 += POPCNT(B[i]); #pragma omp parallel for reduction(+:num2) for(int i=based_nodes*groups*CHUNK;i<nodes*CHUNK;i++) num2 += POPCNT(B[i]); if(num1+num2 == (uint64_t)nodes*l) break; // swap A <-> B uint64_t* tmp = A; A = B; B = tmp; sum += ((double)based_nodes*groups * l - num1) * groups; sum += ((double)added_centers * l - num2) * groups * 2; } *diam = MAX(*diam, kk+1); } if(added_centers){ elements = (added_centers+(UINT64_BITS-1))/UINT64_BITS; parsize = (elements+(CHUNK-1))/CHUNK; int s = based_nodes % procs; int new_rank = (rank - s >= 0)? rank-s : rank-s+procs; for(int t=new_rank;t<parsize;t+=procs){ unsigned int kk, l; clear_buffers(A, B, nodes*CHUNK); for(l=0; l<UINT64_BITS*CHUNK && UINT64_BITS*t*CHUNK+l<added_centers; l++){ unsigned int offset = (UINT64_BITS*t*CHUNK+l+(nodes-added_centers))*CHUNK+l/UINT64_BITS; A[offset] = B[offset] = (0x1ULL<<(l%UINT64_BITS)); } for(kk=0;kk<nodes;kk++){ #pragma omp parallel for for(int i=0;i<nodes;i++) for(int j=0;j<degree;j++){ int n = *(adj + i * degree + j); // int n = adj[i][j]; for(int k=0;k<CHUNK;k++) B[i*CHUNK+k] |= A[n*CHUNK+k]; } uint64_t num = 0; #pragma omp parallel for reduction(+:num) for(int i=based_nodes*groups*CHUNK;i<nodes*CHUNK;i++) num += POPCNT(B[i]); if(num == (uint64_t)added_centers*l) break; // swap A <-> B uint64_t* tmp = A; A = B; B = tmp; sum += ((double)added_centers * l - num); } *diam = MAX(*diam, kk+1); } } MPI_Allreduce(MPI_IN_PLACE, diam, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); sum += (double)nodes * (nodes - 1); free(A); free(B); if(*diam < nodes){ *ASPL = sum / (((double)nodes-1)*nodes); return true; } else{ *diam = INT_MAX; *ASPL = DBL_MAX; return false; } } bool evaluation(const int nodes, int based_nodes, const int groups, const int lines, const int degree, int* restrict adj, int* restrict diam, double* restrict ASPL, const int added_centers, const int algo) { timer_start(TIMER_APSP); bool ret; if(algo == BFS) ret = bfs(nodes, based_nodes, groups, lines, degree, adj, diam, ASPL, added_centers); else if(algo == MATRIX_OP) ret = matrix_op(nodes, based_nodes, degree, adj, groups, diam, ASPL, added_centers); else // (algo == MATRIX_OP_MEM_SAVING) ret = matrix_op_mem_saving(nodes, based_nodes, degree, adj, groups, diam, ASPL, added_centers); timer_stop(TIMER_APSP); return ret; }
psgeswp.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzgeswp.c, normal z -> s, Fri Sep 28 17:38:11 2018 * **/ #include "plasma_async.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include <plasma_core_blas.h> #define A(m, n) (float*)plasma_tile_addr(A, m, n) /******************************************************************************/ void plasma_psgeswp(plasma_enum_t colrow, plasma_desc_t A, int *ipiv, int incx, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; if (colrow == PlasmaRowwise) { for (int n = 0; n < A.nt; n++) { float *a00, *a10; a00 = A(0, n); a10 = A(A.mt-1, n); // Multidependency of the whole panel on its individual tiles. for (int m = 1; m < A.mt-1; m++) { float *amn = A(m, n); #pragma omp task depend (in:amn[0]) \ depend (inout:a00[0]) { int l = 1; l++; } } int ma00 = (A.mt-1)*A.mb; int na00 = plasma_tile_nmain(A, n); int lda10 = plasma_tile_mmain(A, A.mt-1); int nva10 = plasma_tile_nview(A, n); #pragma omp task depend (in:ipiv[0:A.m]) \ depend (inout:a00[0:ma00*na00]) \ depend (inout:a10[0:lda10*nva10]) { int nvan = plasma_tile_nview(A, n); plasma_desc_t view = plasma_desc_view(A, 0, n*A.nb, A.m, nvan); plasma_core_sgeswp(colrow, view, 1, A.m, ipiv, incx); } // Multidependency of individual tiles on the whole panel. for (int m = 1; m < A.mt-1; m++) { float *amn = A(m, n); #pragma omp task depend (in:a00[0]) \ depend (inout:amn[0]) { int l = 1; l++; } } } } else { // PlasmaColumnwise for (int m = 0; m < A.mt; m++) { float *a00, *a01; a00 = A(m, 0); a01 = A(m, A.nt-1); // Multidependency of the whole (row) panel on its individual tiles. for (int n = 1; n < A.nt-1; n++) { float *amn = A(m, n); #pragma omp task depend (in:amn[0]) \ depend (inout:a00[0]) { int l = 1; l++; } } #pragma omp task depend (in:ipiv[0:A.n]) \ depend (inout:a00[0]) \ depend (inout:a01[0]) { int mvam = plasma_tile_mview(A, m); plasma_desc_t view = plasma_desc_view(A, m*A.mb, 0, mvam, A.n); plasma_core_sgeswp(colrow, view, 1, A.n, ipiv, incx); } // Multidependency of individual tiles on the whole (row) panel. for (int n = 1; n < A.nt-1; n++) { float *amn = A(m, n); #pragma omp task depend (in:a00[0]) \ depend (inout:amn[0]) { int l = 1; l++; } } } } }
SimpleEmptySpaceSkipping.h
#ifndef SIMPLE_ESS #define SIMPLE_ESS /** @brief Simple Empty Space Skipping for SimpleVR * @author Andre Aichert */ #include "SimpleVR.h" #include <vector> namespace SimpleVR { /// Render ray entry and exit textures for ray caster. Subdivides cube and turns sub-cubes on and off. class ProxySubCubes: public ProxyGeomSetup { float *vertices; // x y z min max size is n_x+1 x n_y+1 x n_z+1 std::vector<int> indices; float range_min; float range_max; int n_x; int n_y; int n_z; public: ProxySubCubes(const MHD::Image& vol, int subsample=16) : ProxyGeomSetup() , vertices(0x0) , indices(0x0) { range_min=range_max=0; // compute number of sub-cubdes n_x=vol.getData()->dim(0)/subsample; n_y=vol.getData()->dim(1)/subsample; n_z=vol.getData()->dim(2)/subsample; // loop over all vertices (there are n+1 vertices in each direction) vertices=new float[(n_x+1)*(n_y+1)*(n_z+1)*5]; #pragma omp parallel for for (int z=0;z<n_z+1;z++) for (int y=0;y<n_y+1;y++) for (int x=0;x<n_x+1;x++) { // compute location int idx=(n_x+1)*(n_y+1)*z+(n_x+1)*y+x; vertices[idx*5+0]=(float)x/n_x; vertices[idx*5+1]=(float)y/n_y; vertices[idx*5+2]=(float)z/n_z; // find min and max in subcube (for min corner of subcube only) if (x!=n_x&&y!=n_y&&z!=n_z) { unsigned char *data=(unsigned char *)vol.getData()->raw(); unsigned char rmin=255; unsigned char rmax=0; for (int dz=-1;dz<subsample+1;dz++) for (int dy=-1;dy<subsample+1;dy++) for (int dx=-1;dx<subsample+1;dx++) { int vx=(subsample*x+dx); int vy=(subsample*y+dy); int vz=(subsample*z+dz); if (vx<0||vx>=vol.getData()->dim(0)) continue; if (vy<0||vy>=vol.getData()->dim(1)) continue; if (vz<0||vz>=vol.getData()->dim(2)) continue; unsigned char v=data[vz*vol.getData()->dim(0)*vol.getData()->dim(1)+vy*vol.getData()->dim(0)+vx]; if (rmin>v) rmin=v; else if (rmax<v) rmax=v; } vertices[idx*5+3]=rmin/255.0f; vertices[idx*5+4]=rmax/255.0f; } else { // invalid because this vertex is not a min corner of any cube vertices[idx*5+3]=-1; vertices[idx*5+4]=-1; } } std::cout << n_x*n_y*n_z << " cubes.\n"; } ~ProxySubCubes() { delete vertices; } virtual void prolog() { glPushAttrib(GL_ALL_ATTRIB_BITS); glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT); glEnableClientState(GL_VERTEX_ARRAY); glEnableClientState(GL_COLOR_ARRAY); glVertexPointer(3,GL_FLOAT,5*sizeof(float),vertices); glColorPointer(3,GL_FLOAT,5*sizeof(float),vertices); } virtual void renderGeometry() { if (!indices.empty()) glDrawElements(GL_QUADS,(int)indices.size(),GL_UNSIGNED_INT,&indices[0]); } virtual void epilog() { glPopClientAttrib(); glPopAttrib(); } /// Define intensity range guaranteed by empty space skipping virtual void setIntensityRange(float min=0, float max=1) { if (range_min==min && range_max==max) return; // nothing to be done indices.clear(); range_min=min; range_max=max; std::cout << "Intensity range (normalized) " << min << " to " << max << std::endl; unsigned char *active=new unsigned char [n_x*n_y*n_z]; int n_active=0; for (int z=0;z<n_z;z++) for (int y=0;y<n_y;y++) for (int x=0;x<n_x;x++) { int idx_vertices=z*(n_x+1)*(n_y+1)+y*(n_x+1)+x; int idx_active=z*n_x*n_y+y*n_x+x; float range_cube_min=vertices[5*idx_vertices+3]; float range_cube_max=vertices[5*idx_vertices+4]; if (range_cube_max<min||range_cube_min>max) active[idx_active]=0; else { active[idx_active]=1; n_active++; } } std::cout << n_active << " active cells.\n"; // Check for active faces and add face indices for (int z=0;z<n_z;z++) for (int y=0;y<n_y;y++) for (int x=0;x<n_x;x++) { int idx_vertices=z*(n_x+1)*(n_y+1)+y*(n_x+1)+x; int idx_active=z*n_x*n_y+y*n_x+x; if (!active[idx_active]) continue; if (x==0||active[z*n_x*n_y+y*n_x+x-1]==0) { indices.push_back((z+0)*(n_x+1)*(n_y+1)+(y+0)*(n_x+1)+(x+0)); indices.push_back((z+1)*(n_x+1)*(n_y+1)+(y+0)*(n_x+1)+(x+0)); indices.push_back((z+1)*(n_x+1)*(n_y+1)+(y+1)*(n_x+1)+(x+0)); indices.push_back((z+0)*(n_x+1)*(n_y+1)+(y+1)*(n_x+1)+(x+0)); } if (x==n_x-1||active[z*n_x*n_y+y*n_x+x+1]==0) { indices.push_back((z+0)*(n_x+1)*(n_y+1)+(y+0)*(n_x+1)+(x+1)); indices.push_back((z+1)*(n_x+1)*(n_y+1)+(y+0)*(n_x+1)+(x+1)); indices.push_back((z+1)*(n_x+1)*(n_y+1)+(y+1)*(n_x+1)+(x+1)); indices.push_back((z+0)*(n_x+1)*(n_y+1)+(y+1)*(n_x+1)+(x+1)); } if (y==0||active[z*n_x*n_y+(y-1)*n_x+x]==0) { indices.push_back((z+0)*(n_x+1)*(n_y+1)+(y+0)*(n_x+1)+(x+0)); indices.push_back((z+0)*(n_x+1)*(n_y+1)+(y+0)*(n_x+1)+(x+1)); indices.push_back((z+1)*(n_x+1)*(n_y+1)+(y+0)*(n_x+1)+(x+1)); indices.push_back((z+1)*(n_x+1)*(n_y+1)+(y+0)*(n_x+1)+(x+0)); } if (y==n_y-1||active[z*n_x*n_y+(y+1)*n_x+x]==0) { indices.push_back((z+0)*(n_x+1)*(n_y+1)+(y+1)*(n_x+1)+(x+0)); indices.push_back((z+0)*(n_x+1)*(n_y+1)+(y+1)*(n_x+1)+(x+1)); indices.push_back((z+1)*(n_x+1)*(n_y+1)+(y+1)*(n_x+1)+(x+1)); indices.push_back((z+1)*(n_x+1)*(n_y+1)+(y+1)*(n_x+1)+(x+0)); } if (z==0||active[(z-1)*n_x*n_y+y*n_x+x]==0) { indices.push_back((z+0)*(n_x+1)*(n_y+1)+(y+0)*(n_x+1)+(x+1)); indices.push_back((z+0)*(n_x+1)*(n_y+1)+(y+1)*(n_x+1)+(x+1)); indices.push_back((z+0)*(n_x+1)*(n_y+1)+(y+1)*(n_x+1)+(x+0)); indices.push_back((z+0)*(n_x+1)*(n_y+1)+(y+0)*(n_x+1)+(x+0)); } if (z==n_z-1||active[(z+1)*n_x*n_y+y*n_x+x]==0) { indices.push_back((z+1)*(n_x+1)*(n_y+1)+(y+0)*(n_x+1)+(x+1)); indices.push_back((z+1)*(n_x+1)*(n_y+1)+(y+1)*(n_x+1)+(x+1)); indices.push_back((z+1)*(n_x+1)*(n_y+1)+(y+1)*(n_x+1)+(x+0)); indices.push_back((z+1)*(n_x+1)*(n_y+1)+(y+0)*(n_x+1)+(x+0)); } } std::cout << indices.size() << " active faces.\n"; delete active; } }; } // namespace SimpleVR #endif // SIMPLE_ESS
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-2,4),ceild(8*t2-Nz-3,16));t3<=min(floord(4*Nt+Ny-9,16),floord(4*t1+Ny-1,16));t3++) { for (t4=max(max(ceild(t1-510,512),ceild(8*t2-Nz-2035,2048)),ceild(16*t3-Ny-2035,2048));t4<=min(min(floord(4*Nt+Nx-9,2048),floord(4*t1+Nx-1,2048)),floord(16*t3+Nx+3,2048));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),4*t3+2),512*t4+510);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(2048*t4,4*t5+4); ubv=min(2048*t4+2047,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
mediancut.c
/* ** © 2009-2018 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ #include <stdlib.h> #include <stddef.h> #include "libimagequant.h" #include "pam.h" #include "mediancut.h" #define index_of_channel(ch) (offsetof(f_pixel,ch)/sizeof(float)) static f_pixel averagepixels(unsigned int clrs, const hist_item achv[]); struct box { f_pixel color; f_pixel variance; double sum, total_error, max_error; unsigned int ind; unsigned int colors; }; ALWAYS_INLINE static double variance_diff(double val, const double good_enough); inline static double variance_diff(double val, const double good_enough) { val *= val; if (val < good_enough*good_enough) return val*0.25; return val; } /** Weighted per-channel variance of the box. It's used to decide which channel to split by */ static f_pixel box_variance(const hist_item achv[], const struct box *box) { f_pixel mean = box->color; double variancea=0, variancer=0, varianceg=0, varianceb=0; for(unsigned int i = 0; i < box->colors; ++i) { const f_pixel px = achv[box->ind + i].acolor; double weight = achv[box->ind + i].adjusted_weight; variancea += variance_diff(mean.a - px.a, 2.0/256.0)*weight; variancer += variance_diff(mean.r - px.r, 1.0/256.0)*weight; varianceg += variance_diff(mean.g - px.g, 1.0/256.0)*weight; varianceb += variance_diff(mean.b - px.b, 1.0/256.0)*weight; } return (f_pixel){ .a = variancea*(4.0/16.0), .r = variancer*(7.0/16.0), .g = varianceg*(9.0/16.0), .b = varianceb*(5.0/16.0), }; } static double box_max_error(const hist_item achv[], const struct box *box) { f_pixel mean = box->color; double max_error = 0; for(unsigned int i = 0; i < box->colors; ++i) { const double diff = colordifference(mean, achv[box->ind + i].acolor); if (diff > max_error) { max_error = diff; } } return max_error; } ALWAYS_INLINE static double color_weight(f_pixel median, hist_item h); static inline void hist_item_swap(hist_item *l, hist_item *r) { if (l != r) { hist_item t = *l; *l = *r; *r = t; } } ALWAYS_INLINE static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len); inline static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len) { if (len < 32) { return len/2; } const unsigned int aidx=8, bidx=len/2, cidx=len-1; const unsigned int a=base[aidx].tmp.sort_value, b=base[bidx].tmp.sort_value, c=base[cidx].tmp.sort_value; return (a < b) ? ((b < c) ? bidx : ((a < c) ? cidx : aidx )) : ((b > c) ? bidx : ((a < c) ? aidx : cidx )); } ALWAYS_INLINE static unsigned int qsort_partition(hist_item *const base, const unsigned int len); inline static unsigned int qsort_partition(hist_item *const base, const unsigned int len) { unsigned int l = 1, r = len; if (len >= 8) { hist_item_swap(&base[0], &base[qsort_pivot(base,len)]); } const unsigned int pivot_value = base[0].tmp.sort_value; while (l < r) { if (base[l].tmp.sort_value >= pivot_value) { l++; } else { while(l < --r && base[r].tmp.sort_value <= pivot_value) {} hist_item_swap(&base[l], &base[r]); } } l--; hist_item_swap(&base[0], &base[l]); return l; } /** quick select algorithm */ static void hist_item_sort_range(hist_item base[], unsigned int len, unsigned int sort_start) { for(;;) { const unsigned int l = qsort_partition(base, len), r = l+1; if (l > 0 && sort_start < l) { len = l; } else if (r < len && sort_start > r) { base += r; len -= r; sort_start -= r; } else break; } } /** sorts array to make sum of weights lower than halfvar one side, returns edge between <halfvar and >halfvar parts of the set */ static hist_item *hist_item_sort_halfvar(hist_item base[], unsigned int len, double *const lowervar, const double halfvar) { do { const unsigned int l = qsort_partition(base, len), r = l+1; // check if sum of left side is smaller than half, // if it is, then it doesn't need to be sorted unsigned int t = 0; double tmpsum = *lowervar; while (t <= l && tmpsum < halfvar) tmpsum += base[t++].color_weight; if (tmpsum < halfvar) { *lowervar = tmpsum; } else { if (l > 0) { hist_item *res = hist_item_sort_halfvar(base, l, lowervar, halfvar); if (res) return res; } else { // End of left recursion. This will be executed in order from the first element. *lowervar += base[0].color_weight; if (*lowervar > halfvar) return &base[0]; } } if (len > r) { base += r; len -= r; // tail-recursive "call" } else { *lowervar += base[r].color_weight; return (*lowervar > halfvar) ? &base[r] : NULL; } } while(1); } static f_pixel get_median(const struct box *b, hist_item achv[]); typedef struct { unsigned int chan; float variance; } channelvariance; static int comparevariance(const void *ch1, const void *ch2) { return ((const channelvariance*)ch1)->variance > ((const channelvariance*)ch2)->variance ? -1 : (((const channelvariance*)ch1)->variance < ((const channelvariance*)ch2)->variance ? 1 : 0); } /** Finds which channels need to be sorted first and preproceses achv for fast sort */ static double prepare_sort(struct box *b, hist_item achv[]) { /* ** Sort dimensions by their variance, and then sort colors first by dimension with highest variance */ channelvariance channels[4] = { {index_of_channel(a), b->variance.a}, {index_of_channel(r), b->variance.r}, {index_of_channel(g), b->variance.g}, {index_of_channel(b), b->variance.b}, }; qsort(channels, 4, sizeof(channels[0]), comparevariance); const unsigned int ind1 = b->ind; const unsigned int colors = b->colors; #pragma omp parallel for if (colors > 25000) \ schedule(static) default(none) shared(achv, channels) for(unsigned int i=0; i < colors; i++) { const float *chans = (const float *)&achv[ind1 + i].acolor; // Only the first channel really matters. When trying median cut many times // with different histogram weights, I don't want sort randomness to influence outcome. achv[ind1 + i].tmp.sort_value = ((unsigned int)(chans[channels[0].chan]*65535.0)<<16) | (unsigned int)((chans[channels[2].chan] + chans[channels[1].chan]/2.0 + chans[channels[3].chan]/4.0)*65535.0); } const f_pixel median = get_median(b, achv); // box will be split to make color_weight of each side even const unsigned int ind = b->ind, end = ind+b->colors; double totalvar = 0; #pragma omp parallel for if (end - ind > 15000) \ schedule(static) default(shared) reduction(+:totalvar) for(unsigned int j=ind; j < end; j++) totalvar += (achv[j].color_weight = color_weight(median, achv[j])); return totalvar / 2.0; } /** finds median in unsorted set by sorting only minimum required */ static f_pixel get_median(const struct box *b, hist_item achv[]) { const unsigned int median_start = (b->colors-1)/2; hist_item_sort_range(&(achv[b->ind]), b->colors, median_start); if (b->colors&1) return achv[b->ind + median_start].acolor; // technically the second color is not guaranteed to be sorted correctly // but most of the time it is good enough to be useful return averagepixels(2, &achv[b->ind + median_start]); } /* ** Find the best splittable box. -1 if no boxes are splittable. */ static int best_splittable_box(struct box bv[], unsigned int boxes, const double max_mse) { int bi=-1; double maxsum=0; for(unsigned int i=0; i < boxes; i++) { if (bv[i].colors < 2) { continue; } // looks only at max variance, because it's only going to split by it const double cv = MAX(bv[i].variance.r, MAX(bv[i].variance.g,bv[i].variance.b)); double thissum = bv[i].sum * MAX(bv[i].variance.a, cv); if (bv[i].max_error > max_mse) { thissum = thissum* bv[i].max_error/max_mse; } if (thissum > maxsum) { maxsum = thissum; bi = i; } } return bi; } inline static double color_weight(f_pixel median, hist_item h) { float diff = colordifference(median, h.acolor); return sqrt(diff) * (sqrt(1.0+h.adjusted_weight)-1.0); } static void set_colormap_from_boxes(colormap *map, struct box bv[], unsigned int boxes, hist_item *achv); static void adjust_histogram(hist_item *achv, const struct box bv[], unsigned int boxes); static double box_error(const struct box *box, const hist_item achv[]) { f_pixel avg = box->color; double total_error=0; for (unsigned int i = 0; i < box->colors; ++i) { total_error += colordifference(avg, achv[box->ind + i].acolor) * achv[box->ind + i].perceptual_weight; } return total_error; } static bool total_box_error_below_target(double target_mse, struct box bv[], unsigned int boxes, const histogram *hist) { target_mse *= hist->total_perceptual_weight; double total_error=0; for(unsigned int i=0; i < boxes; i++) { // error is (re)calculated lazily if (bv[i].total_error >= 0) { total_error += bv[i].total_error; } if (total_error > target_mse) return false; } for(unsigned int i=0; i < boxes; i++) { if (bv[i].total_error < 0) { bv[i].total_error = box_error(&bv[i], hist->achv); total_error += bv[i].total_error; } if (total_error > target_mse) return false; } return true; } static void box_init(struct box *box, const hist_item *achv, const unsigned int ind, const unsigned int colors, const double sum) { box->ind = ind; box->colors = colors; box->sum = sum; box->total_error = -1; box->color = averagepixels(colors, &achv[ind]); #pragma omp task if (colors > 5000) box->variance = box_variance(achv, box); #pragma omp task if (colors > 8000) box->max_error = box_max_error(achv, box); } /* ** Here is the fun part, the median-cut colormap generator. This is based ** on Paul Heckbert's paper, "Color Image Quantization for Frame Buffer ** Display," SIGGRAPH 1982 Proceedings, page 297. */ LIQ_PRIVATE colormap *mediancut(histogram *hist, unsigned int newcolors, const double target_mse, const double max_mse, void* (*malloc)(size_t), void (*free)(void*)) { hist_item *achv = hist->achv; LIQ_ARRAY(struct box, bv, newcolors); unsigned int boxes = 1; /* ** Set up the initial box. */ #pragma omp parallel #pragma omp single { double sum = 0; for(unsigned int i=0; i < hist->size; i++) { sum += achv[i].adjusted_weight; } #pragma omp taskgroup { box_init(&bv[0], achv, 0, hist->size, sum); } /* ** Main loop: split boxes until we have enough. */ while (boxes < newcolors) { // first splits boxes that exceed quality limit (to have colors for things like odd green pixel), // later raises the limit to allow large smooth areas/gradients get colors. const double current_max_mse = max_mse + (boxes/(double)newcolors)*16.0*max_mse; const int bi = best_splittable_box(bv, boxes, current_max_mse); if (bi < 0) { break; /* ran out of colors! */ } unsigned int indx = bv[bi].ind; unsigned int clrs = bv[bi].colors; /* Classic implementation tries to get even number of colors or pixels in each subdivision. Here, instead of popularity I use (sqrt(popularity)*variance) metric. Each subdivision balances number of pixels (popular colors) and low variance - boxes can be large if they have similar colors. Later boxes with high variance will be more likely to be split. Median used as expected value gives much better results than mean. */ const double halfvar = prepare_sort(&bv[bi], achv); double lowervar=0; // hist_item_sort_halfvar sorts and sums lowervar at the same time // returns item to break at …minus one, which does smell like an off-by-one error. hist_item *break_p = hist_item_sort_halfvar(&achv[indx], clrs, &lowervar, halfvar); unsigned int break_at = MIN(clrs-1, break_p - &achv[indx] + 1); /* ** Split the box. */ double sm = bv[bi].sum; double lowersum = 0; for(unsigned int i=0; i < break_at; i++) lowersum += achv[indx + i].adjusted_weight; #pragma omp taskgroup { box_init(&bv[bi], achv, indx, break_at, lowersum); box_init(&bv[boxes], achv, indx + break_at, clrs - break_at, sm - lowersum); } ++boxes; if (total_box_error_below_target(target_mse, bv, boxes, hist)) { break; } } } colormap *map = pam_colormap(boxes, malloc, free); set_colormap_from_boxes(map, bv, boxes, achv); adjust_histogram(achv, bv, boxes); return map; } static void set_colormap_from_boxes(colormap *map, struct box* bv, unsigned int boxes, hist_item *achv) { /* ** Ok, we've got enough boxes. Now choose a representative color for ** each box. There are a number of possible ways to make this choice. ** One would be to choose the center of the box; this ignores any structure ** within the boxes. Another method would be to average all the colors in ** the box - this is the method specified in Heckbert's paper. */ for(unsigned int bi = 0; bi < boxes; ++bi) { map->palette[bi].acolor = bv[bi].color; /* store total color popularity (perceptual_weight is approximation of it) */ map->palette[bi].popularity = 0; for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) { map->palette[bi].popularity += achv[i].perceptual_weight; } } } /* increase histogram popularity by difference from the final color (this is used as part of feedback loop) */ static void adjust_histogram(hist_item *achv, const struct box* bv, unsigned int boxes) { for(unsigned int bi = 0; bi < boxes; ++bi) { for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) { achv[i].tmp.likely_colormap_index = bi; } } } static f_pixel averagepixels(unsigned int clrs, const hist_item achv[]) { double r = 0, g = 0, b = 0, a = 0, sum = 0; #pragma omp parallel for if (clrs > 25000) \ schedule(static) default(shared) reduction(+:a) reduction(+:r) reduction(+:g) reduction(+:b) reduction(+:sum) for(unsigned int i = 0; i < clrs; i++) { const f_pixel px = achv[i].acolor; const double weight = achv[i].adjusted_weight; sum += weight; a += px.a * weight; r += px.r * weight; g += px.g * weight; b += px.b * weight; } if (sum) { a /= sum; r /= sum; g /= sum; b /= sum; } assert(!isnan(r) && !isnan(g) && !isnan(b) && !isnan(a)); return (f_pixel){.r=r, .g=g, .b=b, .a=a}; }
rt_dtsmqr.c
#include "runtime.h" #include "core_blas-gpu.h" //#pragma omp task inout([lda1*n1]A1, [lda2*n2]A2) in([ldt*nb]V, [ldt*nb]T) label(dtsmqr) //CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); #ifdef PLASMA_WITH_SMP #pragma omp target device (smp) copy_deps #pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(ldtsmqr_smp) void CORE_ldtsmqr_ompss(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork) { CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); } #pragma omp target device (smp) copy_deps #pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(rdtsmqr_smp) void CORE_rdtsmqr_ompss(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork) { CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); } #endif #ifdef PLASMA_WITH_CUDA_PURE #pragma omp target device (cuda) copy_deps #pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(ldtsmqr_cuda) void CORE_ldtsmqr_ompss(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork) { /* CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); */ //printf("\n\n=============================> SALEM\n\n"); cublasOperation_t cutrans; if ( trans == PlasmaNoTrans ) cutrans = CUBLAS_OP_N; else cutrans = CUBLAS_OP_T; cublasHandle_t handle = nanos_get_cublas_handle(); cudaStream_t stream = nanos_get_kernel_execution_stream(); cublasSetStream(handle, stream); cublasDtsmqr(handle, CUBLAS_SIDE_LEFT, cutrans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); } #pragma omp target device (cuda) copy_deps #pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(rdtsmqr_cuda) void CORE_rdtsmqr_ompss(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork) { /* CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); */ cublasOperation_t cutrans; if ( trans == PlasmaNoTrans ) cutrans = CUBLAS_OP_N; else cutrans = CUBLAS_OP_T; cublasHandle_t handle = nanos_get_cublas_handle(); cudaStream_t stream = nanos_get_kernel_execution_stream(); cublasSetStream(handle, stream); cublasDtsmqr(handle, CUBLAS_SIDE_RIGHT, cutrans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); } #endif #ifdef PLASMA_WITH_CUDA_HYBRID #pragma omp target device (smp) copy_deps #pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(ldtsmqr_hyb_smp) void CORE_ldtsmqr_ompss(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork) { CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); } #pragma omp target device (smp) copy_deps #pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(rdtsmqr_hyb_smp) void CORE_rdtsmqr_ompss(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork) { CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); } //Alternative implementations #pragma omp target device (cuda) copy_deps implements(CORE_ldtsmqr_ompss) #pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(ldtsmqr_hyb_cuda) void CORE_ldtsmqr_cuda(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork) { /* CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); */ //printf("\n\n=============================> SALEM\n\n"); cublasOperation_t cutrans; if ( trans == PlasmaNoTrans ) cutrans = CUBLAS_OP_N; else cutrans = CUBLAS_OP_T; cublasHandle_t handle = nanos_get_cublas_handle(); cudaStream_t stream = nanos_get_kernel_execution_stream(); cublasSetStream(handle, stream); cublasDtsmqr(handle, CUBLAS_SIDE_LEFT, cutrans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); } #pragma omp target device (cuda) copy_deps implements(CORE_rdtsmqr_ompss) #pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) out([ib*nb]WORK) label(rdtsmqr_hyb_cuda) void CORE_rdtsmqr_cuda(PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *WORK, int ldwork) { /* CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); */ cublasOperation_t cutrans; if ( trans == PlasmaNoTrans ) cutrans = CUBLAS_OP_N; else cutrans = CUBLAS_OP_T; cublasHandle_t handle = nanos_get_cublas_handle(); cudaStream_t stream = nanos_get_kernel_execution_stream(); cublasSetStream(handle, stream); cublasDtsmqr(handle, CUBLAS_SIDE_RIGHT, cutrans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); } #endif void RT_CORE_dtsmqr(Quark *quark, Quark_Task_Flags *task_flags, PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { QUARK_CORE_dtsmqr( quark, task_flags, side, trans, m1, n1, m2, n2, k, ib, nb, A1, lda1, A2, lda2, V, ldv, T, ldt); } else if (plasma->runtime == PLASMA_OMPSS) { /* */ double *WORK = malloc(ib*nb*sizeof(double)); #pragma omp register ([ib*nb]WORK) int ldwork = side == PlasmaLeft?ib:nb; //#pragma omp task inout([nb*nb]A1, [nb*nb]A2) in([nb*nb]V, [ib*nb]T) label(dtsmqr) //CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); if (side == PlasmaLeft){ int ldwork = ib; //printf("\n\n============> DTSMQR_L BEFORE m1 %d n1 %d m2 %d n2 %d k %d ib %d nb %d lda1 %d lda2 %d ldv %d ldt %d ldwork %d \n", m1, n1, m2, n2, k, ib, nb, lda1, lda2, ldv, ldt, ldwork); CORE_ldtsmqr_ompss(side, trans, m1, n1, m2, n2, k, ib, nb, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); //#pragma omp task inout([nb*nb]A1, [nb*nb]A2) in([nb*nb]V, [ib*nb]T) label(dtsmqr) //#pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) label(ldtsmqr) //CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); //printf("\n\n============> DTSMQR_L AFTER \n"); } else { int ldwork = nb; //printf("\n\n============> DTSMQR_R BEFORE m1 %d n1 %d m2 %d n2 %d k %d ib %d nb %d lda1 %d lda2 %d ldv %d ldt %d ldwork %d \n", m1, n1, m2, n2, k, ib, nb, lda1, lda2, ldv, ldt, ldwork); CORE_rdtsmqr_ompss(side, trans, m1, n1, m2, n2, k, ib, nb, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); //#pragma omp task inout([nb*nb]A1, [nb*nb]A2) in([nb*nb]V, [ib*nb]T) label(dtsmqr) //#pragma omp task inout([lda1*nb]A1, [lda2*nb]A2) in([ldv*nb]V, [ldt*nb]T) label(rdtsmqr) //CORE_dtsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); //printf("\n\n============> DTSMQR_R AFTER \n"); } } }
gimple.h
/* Gimple IR definitions. Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc. Contributed by Aldy Hernandez <aldyh@redhat.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_GIMPLE_H #define GCC_GIMPLE_H #include "pointer-set.h" #include "vec.h" #include "vecprim.h" #include "vecir.h" #include "ggc.h" #include "basic-block.h" #include "tree-ssa-operands.h" #include "tree-ssa-alias.h" #include "internal-fn.h" struct gimple_seq_node_d; typedef struct gimple_seq_node_d *gimple_seq_node; typedef const struct gimple_seq_node_d *const_gimple_seq_node; /* For each block, the PHI nodes that need to be rewritten are stored into these vectors. */ typedef VEC(gimple, heap) *gimple_vec; DEF_VEC_P (gimple_vec); DEF_VEC_ALLOC_P (gimple_vec, heap); enum gimple_code { #define DEFGSCODE(SYM, STRING, STRUCT) SYM, #include "gimple.def" #undef DEFGSCODE LAST_AND_UNUSED_GIMPLE_CODE }; extern const char *const gimple_code_name[]; extern const unsigned char gimple_rhs_class_table[]; /* Error out if a gimple tuple is addressed incorrectly. */ #if defined ENABLE_GIMPLE_CHECKING #define gcc_gimple_checking_assert(EXPR) gcc_assert (EXPR) extern void gimple_check_failed (const_gimple, const char *, int, \ const char *, enum gimple_code, \ enum tree_code) ATTRIBUTE_NORETURN; #define GIMPLE_CHECK(GS, CODE) \ do { \ const_gimple __gs = (GS); \ if (gimple_code (__gs) != (CODE)) \ gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \ (CODE), ERROR_MARK); \ } while (0) #else /* not ENABLE_GIMPLE_CHECKING */ #define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR))) #define GIMPLE_CHECK(GS, CODE) (void)0 #endif /* Class of GIMPLE expressions suitable for the RHS of assignments. See get_gimple_rhs_class. */ enum gimple_rhs_class { GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */ GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */ GIMPLE_BINARY_RHS, /* The expression is a binary operation. */ GIMPLE_UNARY_RHS, /* The expression is a unary operation. */ GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA name, a _DECL, a _REF, etc. */ }; #define GF_CALL_INTERNAL_FN_SHIFT 8 /* Specific flags for individual GIMPLE statements. These flags are always stored in gimple_statement_base.subcode and they may only be defined for statement codes that do not use sub-codes. Values for the masks can overlap as long as the overlapping values are never used in the same statement class. The maximum mask value that can be defined is 1 << 15 (i.e., each statement code can hold up to 16 bitflags). Keep this list sorted. */ enum gf_mask { GF_ASM_INPUT = 1 << 0, GF_ASM_VOLATILE = 1 << 1, GF_CALL_CANNOT_INLINE = 1 << 0, GF_CALL_FROM_THUNK = 1 << 1, GF_CALL_RETURN_SLOT_OPT = 1 << 2, GF_CALL_TAILCALL = 1 << 3, GF_CALL_VA_ARG_PACK = 1 << 4, GF_CALL_NOTHROW = 1 << 5, GF_CALL_INTERNAL = 1 << 6, GF_CALL_INTERNAL_FN = 0xff << GF_CALL_INTERNAL_FN_SHIFT, GF_OMP_PARALLEL_COMBINED = 1 << 0, /* True on an GIMPLE_OMP_RETURN statement if the return does not require a thread synchronization via some sort of barrier. The exact barrier that would otherwise be emitted is dependent on the OMP statement with which this return is associated. */ GF_OMP_RETURN_NOWAIT = 1 << 0, GF_OMP_SECTION_LAST = 1 << 0, GF_PREDICT_TAKEN = 1 << 15 }; /* Currently, there's only one type of gimple debug stmt. Others are envisioned, for example, to enable the generation of is_stmt notes in line number information, to mark sequence points, etc. This subcode is to be used to tell them apart. */ enum gimple_debug_subcode { GIMPLE_DEBUG_BIND = 0 }; /* Masks for selecting a pass local flag (PLF) to work on. These masks are used by gimple_set_plf and gimple_plf. */ enum plf_mask { GF_PLF_1 = 1 << 0, GF_PLF_2 = 1 << 1 }; /* A node in a gimple_seq_d. */ struct GTY((chain_next ("%h.next"), chain_prev ("%h.prev"))) gimple_seq_node_d { gimple stmt; struct gimple_seq_node_d *prev; struct gimple_seq_node_d *next; }; /* A double-linked sequence of gimple statements. */ struct GTY ((chain_next ("%h.next_free"))) gimple_seq_d { /* First and last statements in the sequence. */ gimple_seq_node first; gimple_seq_node last; /* Sequences are created/destroyed frequently. To minimize allocation activity, deallocated sequences are kept in a pool of available sequences. This is the pointer to the next free sequence in the pool. */ gimple_seq next_free; }; /* Return the first node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_first (const_gimple_seq s) { return s ? s->first : NULL; } /* Return the first statement in GIMPLE sequence S. */ static inline gimple gimple_seq_first_stmt (const_gimple_seq s) { gimple_seq_node n = gimple_seq_first (s); return (n) ? n->stmt : NULL; } /* Return the last node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_last (const_gimple_seq s) { return s ? s->last : NULL; } /* Return the last statement in GIMPLE sequence S. */ static inline gimple gimple_seq_last_stmt (const_gimple_seq s) { gimple_seq_node n = gimple_seq_last (s); return (n) ? n->stmt : NULL; } /* Set the last node in GIMPLE sequence S to LAST. */ static inline void gimple_seq_set_last (gimple_seq s, gimple_seq_node last) { s->last = last; } /* Set the first node in GIMPLE sequence S to FIRST. */ static inline void gimple_seq_set_first (gimple_seq s, gimple_seq_node first) { s->first = first; } /* Return true if GIMPLE sequence S is empty. */ static inline bool gimple_seq_empty_p (const_gimple_seq s) { return s == NULL || s->first == NULL; } void gimple_seq_add_stmt (gimple_seq *, gimple); /* Link gimple statement GS to the end of the sequence *SEQ_P. If *SEQ_P is NULL, a new sequence is allocated. This function is similar to gimple_seq_add_stmt, but does not scan the operands. During gimplification, we need to manipulate statement sequences before the def/use vectors have been constructed. */ void gimplify_seq_add_stmt (gimple_seq *, gimple); /* Allocate a new sequence and initialize its first element with STMT. */ static inline gimple_seq gimple_seq_alloc_with_stmt (gimple stmt) { gimple_seq seq = NULL; gimple_seq_add_stmt (&seq, stmt); return seq; } /* Returns the sequence of statements in BB. */ static inline gimple_seq bb_seq (const_basic_block bb) { return (!(bb->flags & BB_RTL) && bb->il.gimple) ? bb->il.gimple->seq : NULL; } /* Sets the sequence of statements in BB to SEQ. */ static inline void set_bb_seq (basic_block bb, gimple_seq seq) { gcc_checking_assert (!(bb->flags & BB_RTL)); bb->il.gimple->seq = seq; } /* Iterator object for GIMPLE statement sequences. */ typedef struct { /* Sequence node holding the current statement. */ gimple_seq_node ptr; /* Sequence and basic block holding the statement. These fields are necessary to handle edge cases such as when statement is added to an empty basic block or when the last statement of a block/sequence is removed. */ gimple_seq seq; basic_block bb; } gimple_stmt_iterator; /* Data structure definitions for GIMPLE tuples. NOTE: word markers are for 64 bit hosts. */ struct GTY(()) gimple_statement_base { /* [ WORD 1 ] Main identifying code for a tuple. */ ENUM_BITFIELD(gimple_code) code : 8; /* Nonzero if a warning should not be emitted on this tuple. */ unsigned int no_warning : 1; /* Nonzero if this tuple has been visited. Passes are responsible for clearing this bit before using it. */ unsigned int visited : 1; /* Nonzero if this tuple represents a non-temporal move. */ unsigned int nontemporal_move : 1; /* Pass local flags. These flags are free for any pass to use as they see fit. Passes should not assume that these flags contain any useful value when the pass starts. Any initial state that the pass requires should be set on entry to the pass. See gimple_set_plf and gimple_plf for usage. */ unsigned int plf : 2; /* Nonzero if this statement has been modified and needs to have its operands rescanned. */ unsigned modified : 1; /* Nonzero if this statement contains volatile operands. */ unsigned has_volatile_ops : 1; /* Padding to get subcode to 16 bit alignment. */ unsigned pad : 1; /* The SUBCODE field can be used for tuple-specific flags for tuples that do not require subcodes. Note that SUBCODE should be at least as wide as tree codes, as several tuples store tree codes in there. */ unsigned int subcode : 16; /* UID of this statement. This is used by passes that want to assign IDs to statements. It must be assigned and used by each pass. By default it should be assumed to contain garbage. */ unsigned uid; /* [ WORD 2 ] Locus information for debug info. */ location_t location; /* Number of operands in this tuple. */ unsigned num_ops; /* [ WORD 3 ] Basic block holding this statement. */ struct basic_block_def *bb; /* [ WORD 4 ] Lexical block holding this statement. */ tree block; }; /* Base structure for tuples with operands. */ struct GTY(()) gimple_statement_with_ops_base { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5-6 ] SSA operand vectors. NOTE: It should be possible to amalgamate these vectors with the operand vector OP. However, the SSA operand vectors are organized differently and contain more information (like immediate use chaining). */ struct def_optype_d GTY((skip (""))) *def_ops; struct use_optype_d GTY((skip (""))) *use_ops; }; /* Statements that take register operands. */ struct GTY(()) gimple_statement_with_ops { /* [ WORD 1-6 ] */ struct gimple_statement_with_ops_base opbase; /* [ WORD 7 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.opbase.gsbase.num_ops"))) op[1]; }; /* Base for statements that take both memory and register operands. */ struct GTY(()) gimple_statement_with_memory_ops_base { /* [ WORD 1-6 ] */ struct gimple_statement_with_ops_base opbase; /* [ WORD 7-8 ] Virtual operands for this statement. The GC will pick them up via the ssa_names array. */ tree GTY((skip (""))) vdef; tree GTY((skip (""))) vuse; }; /* Statements that take both memory and register operands. */ struct GTY(()) gimple_statement_with_memory_ops { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* Call statements that take both memory and register operands. */ struct GTY(()) gimple_statement_call { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9-12 ] */ struct pt_solution call_used; struct pt_solution call_clobbered; /* [ WORD 13 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* OpenMP statements (#pragma omp). */ struct GTY(()) gimple_statement_omp { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ gimple_seq body; }; /* GIMPLE_BIND */ struct GTY(()) gimple_statement_bind { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Variables declared in this scope. */ tree vars; /* [ WORD 6 ] This is different than the BLOCK field in gimple_statement_base, which is analogous to TREE_BLOCK (i.e., the lexical block holding this statement). This field is the equivalent of BIND_EXPR_BLOCK in tree land (i.e., the lexical scope defined by this bind). See gimple-low.c. */ tree block; /* [ WORD 7 ] */ gimple_seq body; }; /* GIMPLE_CATCH */ struct GTY(()) gimple_statement_catch { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree types; /* [ WORD 6 ] */ gimple_seq handler; }; /* GIMPLE_EH_FILTER */ struct GTY(()) gimple_statement_eh_filter { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Filter types. */ tree types; /* [ WORD 6 ] Failure actions. */ gimple_seq failure; }; /* GIMPLE_EH_MUST_NOT_THROW */ struct GTY(()) gimple_statement_eh_mnt { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Abort function decl. */ tree fndecl; }; /* GIMPLE_PHI */ struct GTY(()) gimple_statement_phi { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ unsigned capacity; unsigned nargs; /* [ WORD 6 ] */ tree result; /* [ WORD 7 ] */ struct phi_arg_d GTY ((length ("%h.nargs"))) args[1]; }; /* GIMPLE_RESX, GIMPLE_EH_DISPATCH */ struct GTY(()) gimple_statement_eh_ctrl { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Exception region number. */ int region; }; /* GIMPLE_TRY */ struct GTY(()) gimple_statement_try { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Expression to evaluate. */ gimple_seq eval; /* [ WORD 6 ] Cleanup expression. */ gimple_seq cleanup; }; /* Kind of GIMPLE_TRY statements. */ enum gimple_try_flags { /* A try/catch. */ GIMPLE_TRY_CATCH = 1 << 0, /* A try/finally. */ GIMPLE_TRY_FINALLY = 1 << 1, GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY, /* Analogous to TRY_CATCH_IS_CLEANUP. */ GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2 }; /* GIMPLE_WITH_CLEANUP_EXPR */ struct GTY(()) gimple_statement_wce { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be executed if an exception is thrown, not on normal exit of its scope. This flag is analogous to the CLEANUP_EH_ONLY flag in TARGET_EXPRs. */ /* [ WORD 5 ] Cleanup expression. */ gimple_seq cleanup; }; /* GIMPLE_ASM */ struct GTY(()) gimple_statement_asm { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9 ] __asm__ statement. */ const char *string; /* [ WORD 10 ] Number of inputs, outputs, clobbers, labels. */ unsigned char ni; unsigned char no; unsigned char nc; unsigned char nl; /* [ WORD 11 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* GIMPLE_OMP_CRITICAL */ struct GTY(()) gimple_statement_omp_critical { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] Critical section name. */ tree name; }; struct GTY(()) gimple_omp_for_iter { /* Condition code. */ enum tree_code cond; /* Index variable. */ tree index; /* Initial value. */ tree initial; /* Final value. */ tree final; /* Increment. */ tree incr; }; /* GIMPLE_OMP_FOR */ struct GTY(()) gimple_statement_omp_for { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; /* [ WORD 7 ] Number of elements in iter array. */ size_t collapse; /* [ WORD 8 ] */ struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter; /* [ WORD 9 ] Pre-body evaluated before the loop body begins. */ gimple_seq pre_body; }; /* GIMPLE_OMP_PARALLEL */ struct GTY(()) gimple_statement_omp_parallel { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] Clauses. */ tree clauses; /* [ WORD 7 ] Child function holding the body of the parallel region. */ tree child_fn; /* [ WORD 8 ] Shared data argument. */ tree data_arg; }; /* GIMPLE_OMP_TASK */ struct GTY(()) gimple_statement_omp_task { /* [ WORD 1-8 ] */ struct gimple_statement_omp_parallel par; /* [ WORD 9 ] Child function holding firstprivate initialization if needed. */ tree copy_fn; /* [ WORD 10-11 ] Size and alignment in bytes of the argument data block. */ tree arg_size; tree arg_align; }; /* GIMPLE_OMP_SECTION */ /* Uses struct gimple_statement_omp. */ /* GIMPLE_OMP_SECTIONS */ struct GTY(()) gimple_statement_omp_sections { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; /* [ WORD 7 ] The control variable used for deciding which of the sections to execute. */ tree control; }; /* GIMPLE_OMP_CONTINUE. Note: This does not inherit from gimple_statement_omp, because we do not need the body field. */ struct GTY(()) gimple_statement_omp_continue { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree control_def; /* [ WORD 6 ] */ tree control_use; }; /* GIMPLE_OMP_SINGLE */ struct GTY(()) gimple_statement_omp_single { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; }; /* GIMPLE_OMP_ATOMIC_LOAD. Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp contains a sequence, which we don't need here. */ struct GTY(()) gimple_statement_omp_atomic_load { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5-6 ] */ tree rhs, lhs; }; /* GIMPLE_OMP_ATOMIC_STORE. See note on GIMPLE_OMP_ATOMIC_LOAD. */ struct GTY(()) gimple_statement_omp_atomic_store { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree val; }; #define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM, enum gimple_statement_structure_enum { #include "gsstruct.def" LAST_GSS_ENUM }; #undef DEFGSSTRUCT /* Define the overall contents of a gimple tuple. It may be any of the structures declared above for various types of tuples. */ union GTY ((desc ("gimple_statement_structure (&%h)"), variable_size)) gimple_statement_d { struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase; struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops; struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase; struct gimple_statement_with_memory_ops GTY ((tag ("GSS_WITH_MEM_OPS"))) gsmem; struct gimple_statement_call GTY ((tag ("GSS_CALL"))) gimple_call; struct gimple_statement_omp GTY ((tag ("GSS_OMP"))) omp; struct gimple_statement_bind GTY ((tag ("GSS_BIND"))) gimple_bind; struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch; struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter; struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt; struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi; struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl; struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try; struct gimple_statement_wce GTY ((tag ("GSS_WCE"))) gimple_wce; struct gimple_statement_asm GTY ((tag ("GSS_ASM"))) gimple_asm; struct gimple_statement_omp_critical GTY ((tag ("GSS_OMP_CRITICAL"))) gimple_omp_critical; struct gimple_statement_omp_for GTY ((tag ("GSS_OMP_FOR"))) gimple_omp_for; struct gimple_statement_omp_parallel GTY ((tag ("GSS_OMP_PARALLEL"))) gimple_omp_parallel; struct gimple_statement_omp_task GTY ((tag ("GSS_OMP_TASK"))) gimple_omp_task; struct gimple_statement_omp_sections GTY ((tag ("GSS_OMP_SECTIONS"))) gimple_omp_sections; struct gimple_statement_omp_single GTY ((tag ("GSS_OMP_SINGLE"))) gimple_omp_single; struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue; struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load; struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store; }; /* In gimple.c. */ /* Offset in bytes to the location of the operand vector. Zero if there is no operand vector for this tuple structure. */ extern size_t const gimple_ops_offset_[]; /* Map GIMPLE codes to GSS codes. */ extern enum gimple_statement_structure_enum const gss_for_code_[]; /* This variable holds the currently expanded gimple statement for purposes of comminucating the profile info to the builtin expanders. */ extern gimple currently_expanding_gimple_stmt; gimple gimple_build_return (tree); gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL); #define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO) void extract_ops_from_tree_1 (tree, enum tree_code *, tree *, tree *, tree *); gimple gimple_build_assign_with_ops_stat (enum tree_code, tree, tree, tree, tree MEM_STAT_DECL); #define gimple_build_assign_with_ops(c,o1,o2,o3) \ gimple_build_assign_with_ops_stat (c, o1, o2, o3, NULL_TREE MEM_STAT_INFO) #define gimple_build_assign_with_ops3(c,o1,o2,o3,o4) \ gimple_build_assign_with_ops_stat (c, o1, o2, o3, o4 MEM_STAT_INFO) gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL); #define gimple_build_debug_bind(var,val,stmt) \ gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO) gimple gimple_build_call_vec (tree, VEC(tree, heap) *); gimple gimple_build_call (tree, unsigned, ...); gimple gimple_build_call_internal (enum internal_fn, unsigned, ...); gimple gimple_build_call_internal_vec (enum internal_fn, VEC(tree, heap) *); gimple gimple_build_call_from_tree (tree); gimple gimplify_assign (tree, tree, gimple_seq *); gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree); gimple gimple_build_label (tree label); gimple gimple_build_goto (tree dest); gimple gimple_build_nop (void); gimple gimple_build_bind (tree, gimple_seq, tree); gimple gimple_build_asm_vec (const char *, VEC(tree,gc) *, VEC(tree,gc) *, VEC(tree,gc) *, VEC(tree,gc) *); gimple gimple_build_catch (tree, gimple_seq); gimple gimple_build_eh_filter (tree, gimple_seq); gimple gimple_build_eh_must_not_throw (tree); gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags); gimple gimple_build_wce (gimple_seq); gimple gimple_build_resx (int); gimple gimple_build_eh_dispatch (int); gimple gimple_build_switch_nlabels (unsigned, tree, tree); gimple gimple_build_switch (unsigned, tree, tree, ...); gimple gimple_build_switch_vec (tree, tree, VEC(tree,heap) *); gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree); gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree); gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq); gimple gimple_build_omp_critical (gimple_seq, tree); gimple gimple_build_omp_section (gimple_seq); gimple gimple_build_omp_continue (tree, tree); gimple gimple_build_omp_master (gimple_seq); gimple gimple_build_omp_return (bool); gimple gimple_build_omp_ordered (gimple_seq); gimple gimple_build_omp_sections (gimple_seq, tree); gimple gimple_build_omp_sections_switch (void); gimple gimple_build_omp_single (gimple_seq, tree); gimple gimple_build_cdt (tree, tree); gimple gimple_build_omp_atomic_load (tree, tree); gimple gimple_build_omp_atomic_store (tree); gimple gimple_build_predict (enum br_predictor, enum prediction); enum gimple_statement_structure_enum gss_for_assign (enum tree_code); void sort_case_labels (VEC(tree,heap) *); void gimple_set_body (tree, gimple_seq); gimple_seq gimple_body (tree); bool gimple_has_body_p (tree); gimple_seq gimple_seq_alloc (void); void gimple_seq_free (gimple_seq); void gimple_seq_add_seq (gimple_seq *, gimple_seq); gimple_seq gimple_seq_copy (gimple_seq); bool gimple_call_same_target_p (const_gimple, const_gimple); int gimple_call_flags (const_gimple); int gimple_call_return_flags (const_gimple); int gimple_call_arg_flags (const_gimple, unsigned); void gimple_call_reset_alias_info (gimple); bool gimple_assign_copy_p (gimple); bool gimple_assign_ssa_name_copy_p (gimple); bool gimple_assign_unary_nop_p (gimple); void gimple_set_bb (gimple, struct basic_block_def *); void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree); void gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *, enum tree_code, tree, tree, tree); tree gimple_get_lhs (const_gimple); void gimple_set_lhs (gimple, tree); void gimple_replace_lhs (gimple, tree); gimple gimple_copy (gimple); void gimple_set_modified (gimple, bool); void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, tree *); gimple gimple_build_cond_from_tree (tree, tree, tree); void gimple_cond_set_condition_from_tree (gimple, tree); bool gimple_has_side_effects (const_gimple); bool gimple_rhs_has_side_effects (const_gimple); bool gimple_could_trap_p (gimple); bool gimple_could_trap_p_1 (gimple, bool, bool); bool gimple_assign_rhs_could_trap_p (gimple); void gimple_regimplify_operands (gimple, gimple_stmt_iterator *); bool empty_body_p (gimple_seq); unsigned get_gimple_rhs_num_ops (enum tree_code); #define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO) gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL); const char *gimple_decl_printable_name (tree, int); bool gimple_fold_call (gimple_stmt_iterator *gsi, bool inplace); tree gimple_get_virt_method_for_binfo (HOST_WIDE_INT, tree, tree *, bool); void gimple_adjust_this_by_delta (gimple_stmt_iterator *, tree); /* Returns true iff T is a valid GIMPLE statement. */ extern bool is_gimple_stmt (tree); /* Returns true iff TYPE is a valid type for a scalar register variable. */ extern bool is_gimple_reg_type (tree); /* Returns true iff T is a scalar register variable. */ extern bool is_gimple_reg (tree); /* Returns true iff T is any sort of variable. */ extern bool is_gimple_variable (tree); /* Returns true iff T is any sort of symbol. */ extern bool is_gimple_id (tree); /* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */ extern bool is_gimple_min_lval (tree); /* Returns true iff T is something whose address can be taken. */ extern bool is_gimple_addressable (tree); /* Returns true iff T is any valid GIMPLE lvalue. */ extern bool is_gimple_lvalue (tree); /* Returns true iff T is a GIMPLE address. */ bool is_gimple_address (const_tree); /* Returns true iff T is a GIMPLE invariant address. */ bool is_gimple_invariant_address (const_tree); /* Returns true iff T is a GIMPLE invariant address at interprocedural level. */ bool is_gimple_ip_invariant_address (const_tree); /* Returns true iff T is a valid GIMPLE constant. */ bool is_gimple_constant (const_tree); /* Returns true iff T is a GIMPLE restricted function invariant. */ extern bool is_gimple_min_invariant (const_tree); /* Returns true iff T is a GIMPLE restricted interprecodural invariant. */ extern bool is_gimple_ip_invariant (const_tree); /* Returns true iff T is a GIMPLE rvalue. */ extern bool is_gimple_val (tree); /* Returns true iff T is a GIMPLE asm statement input. */ extern bool is_gimple_asm_val (tree); /* Returns true iff T is a valid address operand of a MEM_REF. */ bool is_gimple_mem_ref_addr (tree); /* Returns true iff T is a valid rhs for a MODIFY_EXPR where the LHS is a GIMPLE temporary, a renamed user variable, or something else, respectively. */ extern bool is_gimple_reg_rhs (tree); extern bool is_gimple_mem_rhs (tree); /* Returns true iff T is a valid if-statement condition. */ extern bool is_gimple_condexpr (tree); /* Returns true iff T is a variable that does not need to live in memory. */ extern bool is_gimple_non_addressable (tree t); /* Returns true iff T is a valid call address expression. */ extern bool is_gimple_call_addr (tree); /* If T makes a function call, returns the CALL_EXPR operand. */ extern tree get_call_expr_in (tree t); extern void recalculate_side_effects (tree); extern bool gimple_compare_field_offset (tree, tree); extern tree gimple_register_type (tree); extern tree gimple_register_canonical_type (tree); enum gtc_mode { GTC_MERGE = 0, GTC_DIAG = 1 }; extern bool gimple_types_compatible_p (tree, tree, enum gtc_mode); extern void print_gimple_types_stats (void); extern void free_gimple_type_tables (void); extern tree gimple_unsigned_type (tree); extern tree gimple_signed_type (tree); extern alias_set_type gimple_get_alias_set (tree); extern void count_uses_and_derefs (tree, gimple, unsigned *, unsigned *, unsigned *); extern bool walk_stmt_load_store_addr_ops (gimple, void *, bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *)); extern bool walk_stmt_load_store_ops (gimple, void *, bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *)); extern bool gimple_ior_addresses_taken (bitmap, gimple); extern bool gimple_call_builtin_p (gimple, enum built_in_function); /* In gimplify.c */ extern tree create_tmp_var_raw (tree, const char *); extern tree create_tmp_var_name (const char *); extern tree create_tmp_var (tree, const char *); extern tree create_tmp_reg (tree, const char *); extern tree get_initialized_tmp_var (tree, gimple_seq *, gimple_seq *); extern tree get_formal_tmp_var (tree, gimple_seq *); extern void declare_vars (tree, gimple, bool); extern void annotate_all_with_location (gimple_seq, location_t); /* Validation of GIMPLE expressions. Note that these predicates only check the basic form of the expression, they don't recurse to make sure that underlying nodes are also of the right form. */ typedef bool (*gimple_predicate)(tree); /* FIXME we should deduce this from the predicate. */ enum fallback { fb_none = 0, /* Do not generate a temporary. */ fb_rvalue = 1, /* Generate an rvalue to hold the result of a gimplified expression. */ fb_lvalue = 2, /* Generate an lvalue to hold the result of a gimplified expression. */ fb_mayfail = 4, /* Gimplification may fail. Error issued afterwards. */ fb_either= fb_rvalue | fb_lvalue }; typedef int fallback_t; enum gimplify_status { GS_ERROR = -2, /* Something Bad Seen. */ GS_UNHANDLED = -1, /* A langhook result for "I dunno". */ GS_OK = 0, /* We did something, maybe more to do. */ GS_ALL_DONE = 1 /* The expression is fully gimplified. */ }; struct gimplify_ctx { struct gimplify_ctx *prev_context; VEC(gimple,heap) *bind_expr_stack; tree temps; gimple_seq conditional_cleanups; tree exit_label; tree return_temp; VEC(tree,heap) *case_labels; /* The formal temporary table. Should this be persistent? */ htab_t temp_htab; int conditions; bool save_stack; bool into_ssa; bool allow_rhs_cond_expr; }; extern enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *, bool (*) (tree), fallback_t); extern void gimplify_type_sizes (tree, gimple_seq *); extern void gimplify_one_sizepos (tree *, gimple_seq *); extern bool gimplify_stmt (tree *, gimple_seq *); extern gimple gimplify_body (tree *, tree, bool); extern void push_gimplify_context (struct gimplify_ctx *); extern void pop_gimplify_context (gimple); extern void gimplify_and_add (tree, gimple_seq *); /* Miscellaneous helpers. */ extern void gimple_add_tmp_var (tree); extern gimple gimple_current_bind_expr (void); extern VEC(gimple, heap) *gimple_bind_expr_stack (void); extern tree voidify_wrapper_expr (tree, tree); extern tree build_and_jump (tree *); extern tree force_labels_r (tree *, int *, void *); extern enum gimplify_status gimplify_va_arg_expr (tree *, gimple_seq *, gimple_seq *); struct gimplify_omp_ctx; extern void omp_firstprivatize_variable (struct gimplify_omp_ctx *, tree); extern tree gimple_boolify (tree); extern gimple_predicate rhs_predicate_for (tree); extern tree canonicalize_cond_expr_cond (tree); /* In omp-low.c. */ extern tree omp_reduction_init (tree, tree); /* In tree-nested.c. */ extern void lower_nested_functions (tree); extern void insert_field_into_struct (tree, tree); /* In gimplify.c. */ extern void gimplify_function_tree (tree); /* In cfgexpand.c. */ extern tree gimple_assign_rhs_to_tree (gimple); /* In builtins.c */ extern bool validate_gimple_arglist (const_gimple, ...); /* In tree-ssa.c */ extern bool tree_ssa_useless_type_conversion (tree); extern tree tree_ssa_strip_useless_type_conversions (tree); extern bool useless_type_conversion_p (tree, tree); extern bool types_compatible_p (tree, tree); /* Return the code for GIMPLE statement G. */ static inline enum gimple_code gimple_code (const_gimple g) { return g->gsbase.code; } /* Return the GSS code used by a GIMPLE code. */ static inline enum gimple_statement_structure_enum gss_for_code (enum gimple_code code) { gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE); return gss_for_code_[code]; } /* Return which GSS code is used by GS. */ static inline enum gimple_statement_structure_enum gimple_statement_structure (gimple gs) { return gss_for_code (gimple_code (gs)); } /* Return true if statement G has sub-statements. This is only true for High GIMPLE statements. */ static inline bool gimple_has_substatements (gimple g) { switch (gimple_code (g)) { case GIMPLE_BIND: case GIMPLE_CATCH: case GIMPLE_EH_FILTER: case GIMPLE_TRY: case GIMPLE_OMP_FOR: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_CRITICAL: case GIMPLE_WITH_CLEANUP_EXPR: return true; default: return false; } } /* Return the basic block holding statement G. */ static inline struct basic_block_def * gimple_bb (const_gimple g) { return g->gsbase.bb; } /* Return the lexical scope block holding statement G. */ static inline tree gimple_block (const_gimple g) { return g->gsbase.block; } /* Set BLOCK to be the lexical scope block holding statement G. */ static inline void gimple_set_block (gimple g, tree block) { g->gsbase.block = block; } /* Return location information for statement G. */ static inline location_t gimple_location (const_gimple g) { return g->gsbase.location; } /* Return pointer to location information for statement G. */ static inline const location_t * gimple_location_ptr (const_gimple g) { return &g->gsbase.location; } /* Set location information for statement G. */ static inline void gimple_set_location (gimple g, location_t location) { g->gsbase.location = location; } /* Return true if G contains location information. */ static inline bool gimple_has_location (const_gimple g) { return gimple_location (g) != UNKNOWN_LOCATION; } /* Return the file name of the location of STMT. */ static inline const char * gimple_filename (const_gimple stmt) { return LOCATION_FILE (gimple_location (stmt)); } /* Return the line number of the location of STMT. */ static inline int gimple_lineno (const_gimple stmt) { return LOCATION_LINE (gimple_location (stmt)); } /* Determine whether SEQ is a singleton. */ static inline bool gimple_seq_singleton_p (gimple_seq seq) { return ((gimple_seq_first (seq) != NULL) && (gimple_seq_first (seq) == gimple_seq_last (seq))); } /* Return true if no warnings should be emitted for statement STMT. */ static inline bool gimple_no_warning_p (const_gimple stmt) { return stmt->gsbase.no_warning; } /* Set the no_warning flag of STMT to NO_WARNING. */ static inline void gimple_set_no_warning (gimple stmt, bool no_warning) { stmt->gsbase.no_warning = (unsigned) no_warning; } /* Set the visited status on statement STMT to VISITED_P. */ static inline void gimple_set_visited (gimple stmt, bool visited_p) { stmt->gsbase.visited = (unsigned) visited_p; } /* Return the visited status for statement STMT. */ static inline bool gimple_visited_p (gimple stmt) { return stmt->gsbase.visited; } /* Set pass local flag PLF on statement STMT to VAL_P. */ static inline void gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p) { if (val_p) stmt->gsbase.plf |= (unsigned int) plf; else stmt->gsbase.plf &= ~((unsigned int) plf); } /* Return the value of pass local flag PLF on statement STMT. */ static inline unsigned int gimple_plf (gimple stmt, enum plf_mask plf) { return stmt->gsbase.plf & ((unsigned int) plf); } /* Set the UID of statement. */ static inline void gimple_set_uid (gimple g, unsigned uid) { g->gsbase.uid = uid; } /* Return the UID of statement. */ static inline unsigned gimple_uid (const_gimple g) { return g->gsbase.uid; } /* Return true if GIMPLE statement G has register or memory operands. */ static inline bool gimple_has_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN; } /* Return true if GIMPLE statement G has memory operands. */ static inline bool gimple_has_mem_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN; } /* Return the set of DEF operands for statement G. */ static inline struct def_optype_d * gimple_def_ops (const_gimple g) { if (!gimple_has_ops (g)) return NULL; return g->gsops.opbase.def_ops; } /* Set DEF to be the set of DEF operands for statement G. */ static inline void gimple_set_def_ops (gimple g, struct def_optype_d *def) { gcc_gimple_checking_assert (gimple_has_ops (g)); g->gsops.opbase.def_ops = def; } /* Return the set of USE operands for statement G. */ static inline struct use_optype_d * gimple_use_ops (const_gimple g) { if (!gimple_has_ops (g)) return NULL; return g->gsops.opbase.use_ops; } /* Set USE to be the set of USE operands for statement G. */ static inline void gimple_set_use_ops (gimple g, struct use_optype_d *use) { gcc_gimple_checking_assert (gimple_has_ops (g)); g->gsops.opbase.use_ops = use; } /* Return the set of VUSE operand for statement G. */ static inline use_operand_p gimple_vuse_op (const_gimple g) { struct use_optype_d *ops; if (!gimple_has_mem_ops (g)) return NULL_USE_OPERAND_P; ops = g->gsops.opbase.use_ops; if (ops && USE_OP_PTR (ops)->use == &g->gsmembase.vuse) return USE_OP_PTR (ops); return NULL_USE_OPERAND_P; } /* Return the set of VDEF operand for statement G. */ static inline def_operand_p gimple_vdef_op (const_gimple g) { struct def_optype_d *ops; if (!gimple_has_mem_ops (g)) return NULL_DEF_OPERAND_P; ops = g->gsops.opbase.def_ops; if (ops && DEF_OP_PTR (ops) == &g->gsmembase.vdef) return DEF_OP_PTR (ops); return NULL_DEF_OPERAND_P; } /* Return the single VUSE operand of the statement G. */ static inline tree gimple_vuse (const_gimple g) { if (!gimple_has_mem_ops (g)) return NULL_TREE; return g->gsmembase.vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree gimple_vdef (const_gimple g) { if (!gimple_has_mem_ops (g)) return NULL_TREE; return g->gsmembase.vdef; } /* Return the single VUSE operand of the statement G. */ static inline tree * gimple_vuse_ptr (gimple g) { if (!gimple_has_mem_ops (g)) return NULL; return &g->gsmembase.vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree * gimple_vdef_ptr (gimple g) { if (!gimple_has_mem_ops (g)) return NULL; return &g->gsmembase.vdef; } /* Set the single VUSE operand of the statement G. */ static inline void gimple_set_vuse (gimple g, tree vuse) { gcc_gimple_checking_assert (gimple_has_mem_ops (g)); g->gsmembase.vuse = vuse; } /* Set the single VDEF operand of the statement G. */ static inline void gimple_set_vdef (gimple g, tree vdef) { gcc_gimple_checking_assert (gimple_has_mem_ops (g)); g->gsmembase.vdef = vdef; } /* Return true if statement G has operands and the modified field has been set. */ static inline bool gimple_modified_p (const_gimple g) { return (gimple_has_ops (g)) ? (bool) g->gsbase.modified : false; } /* Return the tree code for the expression computed by STMT. This is only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For GIMPLE_CALL, return CALL_EXPR as the expression code for consistency. This is useful when the caller needs to deal with the three kinds of computation that GIMPLE supports. */ static inline enum tree_code gimple_expr_code (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_COND) return (enum tree_code) stmt->gsbase.subcode; else { gcc_gimple_checking_assert (code == GIMPLE_CALL); return CALL_EXPR; } } /* Mark statement S as modified, and update it. */ static inline void update_stmt (gimple s) { if (gimple_has_ops (s)) { gimple_set_modified (s, true); update_stmt_operands (s); } } /* Update statement S if it has been optimized. */ static inline void update_stmt_if_modified (gimple s) { if (gimple_modified_p (s)) update_stmt_operands (s); } /* Return true if statement STMT contains volatile operands. */ static inline bool gimple_has_volatile_ops (const_gimple stmt) { if (gimple_has_mem_ops (stmt)) return stmt->gsbase.has_volatile_ops; else return false; } /* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */ static inline void gimple_set_has_volatile_ops (gimple stmt, bool volatilep) { if (gimple_has_mem_ops (stmt)) stmt->gsbase.has_volatile_ops = (unsigned) volatilep; } /* Return true if statement STMT may access memory. */ static inline bool gimple_references_memory_p (gimple stmt) { return gimple_has_mem_ops (stmt) && gimple_vuse (stmt); } /* Return the subcode for OMP statement S. */ static inline unsigned gimple_omp_subcode (const_gimple s) { gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD && gimple_code (s) <= GIMPLE_OMP_SINGLE); return s->gsbase.subcode; } /* Set the subcode for OMP statement S to SUBCODE. */ static inline void gimple_omp_set_subcode (gimple s, unsigned int subcode) { /* We only have 16 bits for the subcode. Assert that we are not overflowing it. */ gcc_gimple_checking_assert (subcode < (1 << 16)); s->gsbase.subcode = subcode; } /* Set the nowait flag on OMP_RETURN statement S. */ static inline void gimple_omp_return_set_nowait (gimple s) { GIMPLE_CHECK (s, GIMPLE_OMP_RETURN); s->gsbase.subcode |= GF_OMP_RETURN_NOWAIT; } /* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT flag set. */ static inline bool gimple_omp_return_nowait_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_RETURN); return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0; } /* Return true if OMP section statement G has the GF_OMP_SECTION_LAST flag set. */ static inline bool gimple_omp_section_last_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0; } /* Set the GF_OMP_SECTION_LAST flag on G. */ static inline void gimple_omp_section_set_last (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); g->gsbase.subcode |= GF_OMP_SECTION_LAST; } /* Return true if OMP parallel statement G has the GF_OMP_PARALLEL_COMBINED flag set. */ static inline bool gimple_omp_parallel_combined_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0; } /* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean value of COMBINED_P. */ static inline void gimple_omp_parallel_set_combined_p (gimple g, bool combined_p) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); if (combined_p) g->gsbase.subcode |= GF_OMP_PARALLEL_COMBINED; else g->gsbase.subcode &= ~GF_OMP_PARALLEL_COMBINED; } /* Return the number of operands for statement GS. */ static inline unsigned gimple_num_ops (const_gimple gs) { return gs->gsbase.num_ops; } /* Set the number of operands for statement GS. */ static inline void gimple_set_num_ops (gimple gs, unsigned num_ops) { gs->gsbase.num_ops = num_ops; } /* Return the array of operands for statement GS. */ static inline tree * gimple_ops (gimple gs) { size_t off; /* All the tuples have their operand vector at the very bottom of the structure. Note that those structures that do not have an operand vector have a zero offset. */ off = gimple_ops_offset_[gimple_statement_structure (gs)]; gcc_gimple_checking_assert (off != 0); return (tree *) ((char *) gs + off); } /* Return operand I for statement GS. */ static inline tree gimple_op (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs))[i]; } else return NULL_TREE; } /* Return a pointer to operand I for statement GS. */ static inline tree * gimple_op_ptr (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs)) + i; } else return NULL; } /* Set operand I of statement GS to OP. */ static inline void gimple_set_op (gimple gs, unsigned i, tree op) { gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs)); /* Note. It may be tempting to assert that OP matches is_gimple_operand, but that would be wrong. Different tuples accept slightly different sets of tree operands. Each caller should perform its own validation. */ gimple_ops (gs)[i] = op; } /* Return true if GS is a GIMPLE_ASSIGN. */ static inline bool is_gimple_assign (const_gimple gs) { return gimple_code (gs) == GIMPLE_ASSIGN; } /* Determine if expression CODE is one of the valid expressions that can be used on the RHS of GIMPLE assignments. */ static inline enum gimple_rhs_class get_gimple_rhs_class (enum tree_code code) { return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code]; } /* Return the LHS of assignment statement GS. */ static inline tree gimple_assign_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 0); } /* Return a pointer to the LHS of assignment statement GS. */ static inline tree * gimple_assign_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of assignment statement GS. */ static inline void gimple_assign_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return the first operand on the RHS of assignment statement GS. */ static inline tree gimple_assign_rhs1 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 1); } /* Return a pointer to the first operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs1_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 1); } /* Set RHS to be the first operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs1 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 1, rhs); } /* Return the second operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs2 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 3) return gimple_op (gs, 2); else return NULL_TREE; } /* Return a pointer to the second operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs2_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 2); } /* Set RHS to be the second operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs2 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 2, rhs); } /* Return the third operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs3 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 4) return gimple_op (gs, 3); else return NULL_TREE; } /* Return a pointer to the third operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs3_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 3); } /* Set RHS to be the third operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs3 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 3, rhs); } /* A wrapper around gimple_assign_set_rhs_with_ops_1, for callers which expect to see only a maximum of two operands. */ static inline void gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code, tree op1, tree op2) { gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL); } /* A wrapper around extract_ops_from_tree_1, for callers which expect to see only a maximum of two operands. */ static inline void extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0, tree *op1) { tree op2; extract_ops_from_tree_1 (expr, code, op0, op1, &op2); gcc_assert (op2 == NULL_TREE); } /* Returns true if GS is a nontemporal move. */ static inline bool gimple_assign_nontemporal_move_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gs->gsbase.nontemporal_move; } /* Sets nontemporal move flag of GS to NONTEMPORAL. */ static inline void gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gs->gsbase.nontemporal_move = nontemporal; } /* Return the code of the expression computed on the rhs of assignment statement GS. In case that the RHS is a single object, returns the tree code of the object. */ static inline enum tree_code gimple_assign_rhs_code (const_gimple gs) { enum tree_code code; GIMPLE_CHECK (gs, GIMPLE_ASSIGN); code = (enum tree_code) gs->gsbase.subcode; /* While we initially set subcode to the TREE_CODE of the rhs for GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay in sync when we rewrite stmts into SSA form or do SSA propagations. */ if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS) code = TREE_CODE (gimple_assign_rhs1 (gs)); return code; } /* Set CODE to be the code for the expression computed on the RHS of assignment S. */ static inline void gimple_assign_set_rhs_code (gimple s, enum tree_code code) { GIMPLE_CHECK (s, GIMPLE_ASSIGN); s->gsbase.subcode = code; } /* Return the gimple rhs class of the code of the expression computed on the rhs of assignment statement GS. This will never return GIMPLE_INVALID_RHS. */ static inline enum gimple_rhs_class gimple_assign_rhs_class (const_gimple gs) { return get_gimple_rhs_class (gimple_assign_rhs_code (gs)); } /* Return true if GS is an assignment with a singleton RHS, i.e., there is no operator associated with the assignment itself. Unlike gimple_assign_copy_p, this predicate returns true for any RHS operand, including those that perform an operation and do not have the semantics of a copy, such as COND_EXPR. */ static inline bool gimple_assign_single_p (gimple gs) { return (is_gimple_assign (gs) && gimple_assign_rhs_class (gs) == GIMPLE_SINGLE_RHS); } /* Return true if S is a type-cast assignment. */ static inline bool gimple_assign_cast_p (gimple s) { if (is_gimple_assign (s)) { enum tree_code sc = gimple_assign_rhs_code (s); return CONVERT_EXPR_CODE_P (sc) || sc == VIEW_CONVERT_EXPR || sc == FIX_TRUNC_EXPR; } return false; } /* Return true if GS is a GIMPLE_CALL. */ static inline bool is_gimple_call (const_gimple gs) { return gimple_code (gs) == GIMPLE_CALL; } /* Return the LHS of call statement GS. */ static inline tree gimple_call_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 0); } /* Return a pointer to the LHS of call statement GS. */ static inline tree * gimple_call_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of call statement GS. */ static inline void gimple_call_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return the tree node representing the function called by call statement GS. */ static inline tree gimple_call_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 1); } /* Return true if call GS calls an internal-only function, as enumerated by internal_fn. */ static inline bool gimple_call_internal_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return (gs->gsbase.subcode & GF_CALL_INTERNAL) != 0; } /* Return the target of internal call GS. */ static inline enum internal_fn gimple_call_internal_fn (const_gimple gs) { gcc_assert (gimple_call_internal_p (gs)); return (enum internal_fn) (gs->gsbase.subcode >> GF_CALL_INTERNAL_FN_SHIFT); } /* Return a pointer to the tree node representing the function called by call statement GS. */ static inline tree * gimple_call_fn_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 1); } /* Set FN to be the function called by call statement GS. */ static inline void gimple_call_set_fn (gimple gs, tree fn) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_assert (!gimple_call_internal_p (gs)); gimple_set_op (gs, 1, fn); } /* Set FNDECL to be the function called by call statement GS. */ static inline void gimple_call_set_fndecl (gimple gs, tree decl) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_assert (!gimple_call_internal_p (gs)); gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl)); } /* Set internal function FN to be the function called by call statement GS. */ static inline void gimple_call_set_internal_fn (gimple gs, enum internal_fn fn) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_assert (gimple_call_internal_p (gs)); gs->gsbase.subcode &= ~GF_CALL_INTERNAL_FN; gs->gsbase.subcode |= (int) fn << GF_CALL_INTERNAL_FN_SHIFT; } /* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it. Otherwise return NULL. This function is analogous to get_callee_fndecl in tree land. */ static inline tree gimple_call_fndecl (const_gimple gs) { tree addr = gimple_call_fn (gs); if (addr && TREE_CODE (addr) == ADDR_EXPR) { tree fndecl = TREE_OPERAND (addr, 0); if (TREE_CODE (fndecl) == MEM_REF) { if (TREE_CODE (TREE_OPERAND (fndecl, 0)) == ADDR_EXPR && integer_zerop (TREE_OPERAND (fndecl, 1))) return TREE_OPERAND (TREE_OPERAND (fndecl, 0), 0); else return NULL_TREE; } return TREE_OPERAND (addr, 0); } return NULL_TREE; } /* Return the type returned by call statement GS. */ static inline tree gimple_call_return_type (const_gimple gs) { tree fn, type; fn = gimple_call_fn (gs); if (fn == NULL_TREE) return TREE_TYPE (gimple_call_lhs (gs)); type = TREE_TYPE (fn); /* See through the pointer. */ type = TREE_TYPE (type); /* The type returned by a FUNCTION_DECL is the type of its function type. */ return TREE_TYPE (type); } /* Return the static chain for call statement GS. */ static inline tree gimple_call_chain (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 2); } /* Return a pointer to the static chain for call statement GS. */ static inline tree * gimple_call_chain_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 2); } /* Set CHAIN to be the static chain for call statement GS. */ static inline void gimple_call_set_chain (gimple gs, tree chain) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 2, chain); } /* Return the number of arguments used by call statement GS. */ static inline unsigned gimple_call_num_args (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_CALL); num_ops = gimple_num_ops (gs); return num_ops - 3; } /* Return the argument at position INDEX for call statement GS. */ static inline tree gimple_call_arg (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, index + 3); } /* Return a pointer to the argument at position INDEX for call statement GS. */ static inline tree * gimple_call_arg_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, index + 3); } /* Set ARG to be the argument at position INDEX for call statement GS. */ static inline void gimple_call_set_arg (gimple gs, unsigned index, tree arg) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, index + 3, arg); } /* If TAIL_P is true, mark call statement S as being a tail call (i.e., a call just before the exit of a function). These calls are candidate for tail call optimization. */ static inline void gimple_call_set_tail (gimple s, bool tail_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (tail_p) s->gsbase.subcode |= GF_CALL_TAILCALL; else s->gsbase.subcode &= ~GF_CALL_TAILCALL; } /* Return true if GIMPLE_CALL S is marked as a tail call. */ static inline bool gimple_call_tail_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_TAILCALL) != 0; } /* Set the inlinable status of GIMPLE_CALL S to INLINABLE_P. */ static inline void gimple_call_set_cannot_inline (gimple s, bool inlinable_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (inlinable_p) s->gsbase.subcode |= GF_CALL_CANNOT_INLINE; else s->gsbase.subcode &= ~GF_CALL_CANNOT_INLINE; } /* Return true if GIMPLE_CALL S cannot be inlined. */ static inline bool gimple_call_cannot_inline_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_CANNOT_INLINE) != 0; } /* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return slot optimization. This transformation uses the target of the call expansion as the return slot for calls that return in memory. */ static inline void gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (return_slot_opt_p) s->gsbase.subcode |= GF_CALL_RETURN_SLOT_OPT; else s->gsbase.subcode &= ~GF_CALL_RETURN_SLOT_OPT; } /* Return true if S is marked for return slot optimization. */ static inline bool gimple_call_return_slot_opt_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_RETURN_SLOT_OPT) != 0; } /* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a thunk to the thunked-to function. */ static inline void gimple_call_set_from_thunk (gimple s, bool from_thunk_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (from_thunk_p) s->gsbase.subcode |= GF_CALL_FROM_THUNK; else s->gsbase.subcode &= ~GF_CALL_FROM_THUNK; } /* Return true if GIMPLE_CALL S is a jump from a thunk. */ static inline bool gimple_call_from_thunk_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_FROM_THUNK) != 0; } /* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline void gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (pass_arg_pack_p) s->gsbase.subcode |= GF_CALL_VA_ARG_PACK; else s->gsbase.subcode &= ~GF_CALL_VA_ARG_PACK; } /* Return true if GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline bool gimple_call_va_arg_pack_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_VA_ARG_PACK) != 0; } /* Return true if S is a noreturn call. */ static inline bool gimple_call_noreturn_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NORETURN) != 0; } /* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw even if the called function can throw in other cases. */ static inline void gimple_call_set_nothrow (gimple s, bool nothrow_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (nothrow_p) s->gsbase.subcode |= GF_CALL_NOTHROW; else s->gsbase.subcode &= ~GF_CALL_NOTHROW; } /* Return true if S is a nothrow call. */ static inline bool gimple_call_nothrow_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NOTHROW) != 0; } /* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */ static inline void gimple_call_copy_flags (gimple dest_call, gimple orig_call) { GIMPLE_CHECK (dest_call, GIMPLE_CALL); GIMPLE_CHECK (orig_call, GIMPLE_CALL); dest_call->gsbase.subcode = orig_call->gsbase.subcode; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_use_set (gimple call) { GIMPLE_CHECK (call, GIMPLE_CALL); return &call->gimple_call.call_used; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_clobber_set (gimple call) { GIMPLE_CHECK (call, GIMPLE_CALL); return &call->gimple_call.call_clobbered; } /* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a non-NULL lhs. */ static inline bool gimple_has_lhs (gimple stmt) { return (is_gimple_assign (stmt) || (is_gimple_call (stmt) && gimple_call_lhs (stmt) != NULL_TREE)); } /* Return the code of the predicate computed by conditional statement GS. */ static inline enum tree_code gimple_cond_code (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return (enum tree_code) gs->gsbase.subcode; } /* Set CODE to be the predicate code for the conditional statement GS. */ static inline void gimple_cond_set_code (gimple gs, enum tree_code code) { GIMPLE_CHECK (gs, GIMPLE_COND); gs->gsbase.subcode = code; } /* Return the LHS of the predicate computed by conditional statement GS. */ static inline tree gimple_cond_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 0); } /* Return the pointer to the LHS of the predicate computed by conditional statement GS. */ static inline tree * gimple_cond_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 0, lhs); } /* Return the RHS operand of the predicate computed by conditional GS. */ static inline tree gimple_cond_rhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 1); } /* Return the pointer to the RHS operand of the predicate computed by conditional GS. */ static inline tree * gimple_cond_rhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 1); } /* Set RHS to be the RHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_rhs (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 1, rhs); } /* Return the label used by conditional statement GS when its predicate evaluates to true. */ static inline tree gimple_cond_true_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 2); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to true. */ static inline void gimple_cond_set_true_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 2, label); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to false. */ static inline void gimple_cond_set_false_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 3, label); } /* Return the label used by conditional statement GS when its predicate evaluates to false. */ static inline tree gimple_cond_false_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 3); } /* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */ static inline void gimple_cond_make_false (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_false_node); gs->gsbase.subcode = EQ_EXPR; } /* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */ static inline void gimple_cond_make_true (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_true_node); gs->gsbase.subcode = EQ_EXPR; } /* Check if conditional statemente GS is of the form 'if (1 == 1)', 'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */ static inline bool gimple_cond_true_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs != rhs) return true; if (code == EQ_EXPR && lhs == rhs) return true; return false; } /* Check if conditional statement GS is of the form 'if (1 != 1)', 'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */ static inline bool gimple_cond_false_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs == rhs) return true; if (code == EQ_EXPR && lhs != rhs) return true; return false; } /* Check if conditional statement GS is of the form 'if (var != 0)' or 'if (var == 1)' */ static inline bool gimple_cond_single_var_p (gimple gs) { if (gimple_cond_code (gs) == NE_EXPR && gimple_cond_rhs (gs) == boolean_false_node) return true; if (gimple_cond_code (gs) == EQ_EXPR && gimple_cond_rhs (gs) == boolean_true_node) return true; return false; } /* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */ static inline void gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs) { gimple_cond_set_code (stmt, code); gimple_cond_set_lhs (stmt, lhs); gimple_cond_set_rhs (stmt, rhs); } /* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline tree gimple_label_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_LABEL); return gimple_op (gs, 0); } /* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline void gimple_label_set_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_LABEL); gimple_set_op (gs, 0, label); } /* Return the destination of the unconditional jump GS. */ static inline tree gimple_goto_dest (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_GOTO); return gimple_op (gs, 0); } /* Set DEST to be the destination of the unconditonal jump GS. */ static inline void gimple_goto_set_dest (gimple gs, tree dest) { GIMPLE_CHECK (gs, GIMPLE_GOTO); gimple_set_op (gs, 0, dest); } /* Return the variables declared in the GIMPLE_BIND statement GS. */ static inline tree gimple_bind_vars (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.vars; } /* Set VARS to be the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_vars (gimple gs, tree vars) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.vars = vars; } /* Append VARS to the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_append_vars (gimple gs, tree vars) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.vars = chainon (gs->gimple_bind.vars, vars); } /* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline gimple_seq gimple_bind_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.body; } /* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_body (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.body = seq; } /* Append a statement to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_stmt (gimple gs, gimple stmt) { GIMPLE_CHECK (gs, GIMPLE_BIND); gimple_seq_add_stmt (&gs->gimple_bind.body, stmt); } /* Append a sequence of statements to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_seq (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_BIND); gimple_seq_add_seq (&gs->gimple_bind.body, seq); } /* Return the TREE_BLOCK node associated with GIMPLE_BIND statement GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */ static inline tree gimple_bind_block (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.block; } /* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_block (gimple gs, tree block) { GIMPLE_CHECK (gs, GIMPLE_BIND); gcc_gimple_checking_assert (block == NULL_TREE || TREE_CODE (block) == BLOCK); gs->gimple_bind.block = block; } /* Return the number of input operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_ninputs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.ni; } /* Return the number of output operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_noutputs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.no; } /* Return the number of clobber operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nclobbers (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.nc; } /* Return the number of label operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nlabels (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.nl; } /* Return input operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_input_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni); return gimple_op (gs, index); } /* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_input_op_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni); return gimple_op_ptr (gs, index); } /* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni && TREE_CODE (in_op) == TREE_LIST); gimple_set_op (gs, index, in_op); } /* Return output operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_output_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no); return gimple_op (gs, index + gs->gimple_asm.ni); } /* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_output_op_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no); return gimple_op_ptr (gs, index + gs->gimple_asm.ni); } /* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no && TREE_CODE (out_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni, out_op); } /* Return clobber operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_clobber_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nc); return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no); } /* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nc && TREE_CODE (clobber_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no, clobber_op); } /* Return label operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_label_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nl); return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc); } /* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nl && TREE_CODE (label_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc, label_op); } /* Return the string representing the assembly instruction in GIMPLE_ASM GS. */ static inline const char * gimple_asm_string (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.string; } /* Return true if GS is an asm statement marked volatile. */ static inline bool gimple_asm_volatile_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->gsbase.subcode & GF_ASM_VOLATILE) != 0; } /* If VOLATLE_P is true, mark asm statement GS as volatile. */ static inline void gimple_asm_set_volatile (gimple gs, bool volatile_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (volatile_p) gs->gsbase.subcode |= GF_ASM_VOLATILE; else gs->gsbase.subcode &= ~GF_ASM_VOLATILE; } /* If INPUT_P is true, mark asm GS as an ASM_INPUT. */ static inline void gimple_asm_set_input (gimple gs, bool input_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (input_p) gs->gsbase.subcode |= GF_ASM_INPUT; else gs->gsbase.subcode &= ~GF_ASM_INPUT; } /* Return true if asm GS is an ASM_INPUT. */ static inline bool gimple_asm_input_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->gsbase.subcode & GF_ASM_INPUT) != 0; } /* Return the types handled by GIMPLE_CATCH statement GS. */ static inline tree gimple_catch_types (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return gs->gimple_catch.types; } /* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */ static inline tree * gimple_catch_types_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return &gs->gimple_catch.types; } /* Return the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq gimple_catch_handler (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return gs->gimple_catch.handler; } /* Return a pointer to the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq * gimple_catch_handler_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return &gs->gimple_catch.handler; } /* Set T to be the set of types handled by GIMPLE_CATCH GS. */ static inline void gimple_catch_set_types (gimple gs, tree t) { GIMPLE_CHECK (gs, GIMPLE_CATCH); gs->gimple_catch.types = t; } /* Set HANDLER to be the body of GIMPLE_CATCH GS. */ static inline void gimple_catch_set_handler (gimple gs, gimple_seq handler) { GIMPLE_CHECK (gs, GIMPLE_CATCH); gs->gimple_catch.handler = handler; } /* Return the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree gimple_eh_filter_types (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return gs->gimple_eh_filter.types; } /* Return a pointer to the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree * gimple_eh_filter_types_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return &gs->gimple_eh_filter.types; } /* Return the sequence of statement to execute when GIMPLE_EH_FILTER statement fails. */ static inline gimple_seq gimple_eh_filter_failure (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return gs->gimple_eh_filter.failure; } /* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_types (gimple gs, tree types) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); gs->gimple_eh_filter.types = types; } /* Set FAILURE to be the sequence of statements to execute on failure for GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_failure (gimple gs, gimple_seq failure) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); gs->gimple_eh_filter.failure = failure; } /* Get the function decl to be called by the MUST_NOT_THROW region. */ static inline tree gimple_eh_must_not_throw_fndecl (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW); return gs->gimple_eh_mnt.fndecl; } /* Set the function decl to be called by GS to DECL. */ static inline void gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl) { GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW); gs->gimple_eh_mnt.fndecl = decl; } /* GIMPLE_TRY accessors. */ /* Return the kind of try block represented by GIMPLE_TRY GS. This is either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */ static inline enum gimple_try_flags gimple_try_kind (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return (enum gimple_try_flags) (gs->gsbase.subcode & GIMPLE_TRY_KIND); } /* Set the kind of try block represented by GIMPLE_TRY GS. */ static inline void gimple_try_set_kind (gimple gs, enum gimple_try_flags kind) { GIMPLE_CHECK (gs, GIMPLE_TRY); gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH || kind == GIMPLE_TRY_FINALLY); if (gimple_try_kind (gs) != kind) gs->gsbase.subcode = (unsigned int) kind; } /* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline bool gimple_try_catch_is_cleanup (const_gimple gs) { gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH); return (gs->gsbase.subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0; } /* Return the sequence of statements used as the body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_eval (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return gs->gimple_try.eval; } /* Return the sequence of statements used as the cleanup body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_cleanup (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return gs->gimple_try.cleanup; } /* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline void gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup) { gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH); if (catch_is_cleanup) g->gsbase.subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP; else g->gsbase.subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP; } /* Set EVAL to be the sequence of statements to use as the body for GIMPLE_TRY GS. */ static inline void gimple_try_set_eval (gimple gs, gimple_seq eval) { GIMPLE_CHECK (gs, GIMPLE_TRY); gs->gimple_try.eval = eval; } /* Set CLEANUP to be the sequence of statements to use as the cleanup body for GIMPLE_TRY GS. */ static inline void gimple_try_set_cleanup (gimple gs, gimple_seq cleanup) { GIMPLE_CHECK (gs, GIMPLE_TRY); gs->gimple_try.cleanup = cleanup; } /* Return the cleanup sequence for cleanup statement GS. */ static inline gimple_seq gimple_wce_cleanup (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); return gs->gimple_wce.cleanup; } /* Set CLEANUP to be the cleanup sequence for GS. */ static inline void gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); gs->gimple_wce.cleanup = cleanup; } /* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline bool gimple_wce_cleanup_eh_only (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); return gs->gsbase.subcode != 0; } /* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline void gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); gs->gsbase.subcode = (unsigned int) eh_only_p; } /* Return the maximum number of arguments supported by GIMPLE_PHI GS. */ static inline unsigned gimple_phi_capacity (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.capacity; } /* Return the number of arguments in GIMPLE_PHI GS. This must always be exactly the number of incoming edges for the basic block holding GS. */ static inline unsigned gimple_phi_num_args (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.nargs; } /* Return the SSA name created by GIMPLE_PHI GS. */ static inline tree gimple_phi_result (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.result; } /* Return a pointer to the SSA name created by GIMPLE_PHI GS. */ static inline tree * gimple_phi_result_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return &gs->gimple_phi.result; } /* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */ static inline void gimple_phi_set_result (gimple gs, tree result) { GIMPLE_CHECK (gs, GIMPLE_PHI); gs->gimple_phi.result = result; } /* Return the PHI argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline struct phi_arg_d * gimple_phi_arg (gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_PHI); gcc_gimple_checking_assert (index <= gs->gimple_phi.capacity); return &(gs->gimple_phi.args[index]); } /* Set PHIARG to be the argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline void gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg) { GIMPLE_CHECK (gs, GIMPLE_PHI); gcc_gimple_checking_assert (index <= gs->gimple_phi.nargs); gs->gimple_phi.args[index] = *phiarg; } /* Return the region number for GIMPLE_RESX GS. */ static inline int gimple_resx_region (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RESX); return gs->gimple_eh_ctrl.region; } /* Set REGION to be the region number for GIMPLE_RESX GS. */ static inline void gimple_resx_set_region (gimple gs, int region) { GIMPLE_CHECK (gs, GIMPLE_RESX); gs->gimple_eh_ctrl.region = region; } /* Return the region number for GIMPLE_EH_DISPATCH GS. */ static inline int gimple_eh_dispatch_region (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH); return gs->gimple_eh_ctrl.region; } /* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */ static inline void gimple_eh_dispatch_set_region (gimple gs, int region) { GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH); gs->gimple_eh_ctrl.region = region; } /* Return the number of labels associated with the switch statement GS. */ static inline unsigned gimple_switch_num_labels (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_SWITCH); num_ops = gimple_num_ops (gs); gcc_gimple_checking_assert (num_ops > 1); return num_ops - 1; } /* Set NLABELS to be the number of labels for the switch statement GS. */ static inline void gimple_switch_set_num_labels (gimple g, unsigned nlabels) { GIMPLE_CHECK (g, GIMPLE_SWITCH); gimple_set_num_ops (g, nlabels + 1); } /* Return the index variable used by the switch statement GS. */ static inline tree gimple_switch_index (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op (gs, 0); } /* Return a pointer to the index variable for the switch statement GS. */ static inline tree * gimple_switch_index_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op_ptr (gs, 0); } /* Set INDEX to be the index variable for switch statement GS. */ static inline void gimple_switch_set_index (gimple gs, tree index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index)); gimple_set_op (gs, 0, index); } /* Return the label numbered INDEX. The default label is 0, followed by any labels in a switch statement. */ static inline tree gimple_switch_label (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1); return gimple_op (gs, index + 1); } /* Set the label number INDEX to LABEL. 0 is always the default label. */ static inline void gimple_switch_set_label (gimple gs, unsigned index, tree label) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1 && (label == NULL_TREE || TREE_CODE (label) == CASE_LABEL_EXPR)); gimple_set_op (gs, index + 1, label); } /* Return the default label for a switch statement. */ static inline tree gimple_switch_default_label (const_gimple gs) { return gimple_switch_label (gs, 0); } /* Set the default label for a switch statement. */ static inline void gimple_switch_set_default_label (gimple gs, tree label) { gimple_switch_set_label (gs, 0, label); } /* Return true if GS is a GIMPLE_DEBUG statement. */ static inline bool is_gimple_debug (const_gimple gs) { return gimple_code (gs) == GIMPLE_DEBUG; } /* Return true if S is a GIMPLE_DEBUG BIND statement. */ static inline bool gimple_debug_bind_p (const_gimple s) { if (is_gimple_debug (s)) return s->gsbase.subcode == GIMPLE_DEBUG_BIND; return false; } /* Return the variable bound in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_var (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 0); } /* Return the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1); } /* Return a pointer to the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree * gimple_debug_bind_get_value_ptr (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op_ptr (dbg, 1); } /* Set the variable bound in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_var (gimple dbg, tree var) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 0, var); } /* Set the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_value (gimple dbg, tree value) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, value); } /* The second operand of a GIMPLE_DEBUG_BIND, when the value was optimized away. */ #define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */ /* Remove the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_reset_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE); } /* Return true if the GIMPLE_DEBUG bind statement is bound to a value. */ static inline bool gimple_debug_bind_has_value_p (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE; } #undef GIMPLE_DEBUG_BIND_NOVALUE /* Return the body for the OMP statement GS. */ static inline gimple_seq gimple_omp_body (gimple gs) { return gs->omp.body; } /* Set BODY to be the body for the OMP statement GS. */ static inline void gimple_omp_set_body (gimple gs, gimple_seq body) { gs->omp.body = body; } /* Return the name associated with OMP_CRITICAL statement GS. */ static inline tree gimple_omp_critical_name (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); return gs->gimple_omp_critical.name; } /* Return a pointer to the name associated with OMP critical statement GS. */ static inline tree * gimple_omp_critical_name_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); return &gs->gimple_omp_critical.name; } /* Set NAME to be the name associated with OMP critical statement GS. */ static inline void gimple_omp_critical_set_name (gimple gs, tree name) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); gs->gimple_omp_critical.name = name; } /* Return the clauses associated with OMP_FOR GS. */ static inline tree gimple_omp_for_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.clauses; } /* Return a pointer to the OMP_FOR GS. */ static inline tree * gimple_omp_for_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return &gs->gimple_omp_for.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */ static inline void gimple_omp_for_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gs->gimple_omp_for.clauses = clauses; } /* Get the collapse count of OMP_FOR GS. */ static inline size_t gimple_omp_for_collapse (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.collapse; } /* Return the index variable for OMP_FOR GS. */ static inline tree gimple_omp_for_index (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].index; } /* Return a pointer to the index variable for OMP_FOR GS. */ static inline tree * gimple_omp_for_index_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].index; } /* Set INDEX to be the index variable for OMP_FOR GS. */ static inline void gimple_omp_for_set_index (gimple gs, size_t i, tree index) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].index = index; } /* Return the initial value for OMP_FOR GS. */ static inline tree gimple_omp_for_initial (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].initial; } /* Return a pointer to the initial value for OMP_FOR GS. */ static inline tree * gimple_omp_for_initial_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].initial; } /* Set INITIAL to be the initial value for OMP_FOR GS. */ static inline void gimple_omp_for_set_initial (gimple gs, size_t i, tree initial) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].initial = initial; } /* Return the final value for OMP_FOR GS. */ static inline tree gimple_omp_for_final (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].final; } /* Return a pointer to the final value for OMP_FOR GS. */ static inline tree * gimple_omp_for_final_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].final; } /* Set FINAL to be the final value for OMP_FOR GS. */ static inline void gimple_omp_for_set_final (gimple gs, size_t i, tree final) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].final = final; } /* Return the increment value for OMP_FOR GS. */ static inline tree gimple_omp_for_incr (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].incr; } /* Return a pointer to the increment value for OMP_FOR GS. */ static inline tree * gimple_omp_for_incr_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].incr; } /* Set INCR to be the increment value for OMP_FOR GS. */ static inline void gimple_omp_for_set_incr (gimple gs, size_t i, tree incr) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].incr = incr; } /* Return the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline gimple_seq gimple_omp_for_pre_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.pre_body; } /* Set PRE_BODY to be the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline void gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gs->gimple_omp_for.pre_body = pre_body; } /* Return the clauses associated with OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_child_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_child_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_data_arg (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_data_arg_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_task_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_task_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_task_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_child_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_child_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_task_set_child_fn (gimple gs, tree child_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_task_data_arg (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_task_data_arg_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_task_set_data_arg (gimple gs, tree data_arg) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_taskreg_clauses (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_clauses_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_clauses (gimple gs, tree clauses) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_taskreg_child_fn (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_child_fn_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_taskreg_data_arg (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_data_arg_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the copy function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_copy_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.copy_fn; } /* Return a pointer to the copy function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_copy_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.copy_fn; } /* Set CHILD_FN to be the copy function for OMP_TASK GS. */ static inline void gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.copy_fn = copy_fn; } /* Return size of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_size (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.arg_size; } /* Return a pointer to the data block size for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_size_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.arg_size; } /* Set ARG_SIZE to be the data block size for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_size (gimple gs, tree arg_size) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.arg_size = arg_size; } /* Return align of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_align (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.arg_align; } /* Return a pointer to the data block align for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_align_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.arg_align; } /* Set ARG_SIZE to be the data block align for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_align (gimple gs, tree arg_align) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.arg_align = arg_align; } /* Return the clauses associated with OMP_SINGLE GS. */ static inline tree gimple_omp_single_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); return gs->gimple_omp_single.clauses; } /* Return a pointer to the clauses associated with OMP_SINGLE GS. */ static inline tree * gimple_omp_single_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); return &gs->gimple_omp_single.clauses; } /* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */ static inline void gimple_omp_single_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); gs->gimple_omp_single.clauses = clauses; } /* Return the clauses associated with OMP_SECTIONS GS. */ static inline tree gimple_omp_sections_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return gs->gimple_omp_sections.clauses; } /* Return a pointer to the clauses associated with OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return &gs->gimple_omp_sections.clauses; } /* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS GS. */ static inline void gimple_omp_sections_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); gs->gimple_omp_sections.clauses = clauses; } /* Return the control variable associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline tree gimple_omp_sections_control (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return gs->gimple_omp_sections.control; } /* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_control_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return &gs->gimple_omp_sections.control; } /* Set CONTROL to be the set of clauses associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline void gimple_omp_sections_set_control (gimple gs, tree control) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); gs->gimple_omp_sections.control = control; } /* Set COND to be the condition code for OMP_FOR GS. */ static inline void gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison && i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].cond = cond; } /* Return the condition code associated with OMP_FOR GS. */ static inline enum tree_code gimple_omp_for_cond (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].cond; } /* Set the value being stored in an atomic store. */ static inline void gimple_omp_atomic_store_set_val (gimple g, tree val) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); g->gimple_omp_atomic_store.val = val; } /* Return the value being stored in an atomic store. */ static inline tree gimple_omp_atomic_store_val (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return g->gimple_omp_atomic_store.val; } /* Return a pointer to the value being stored in an atomic store. */ static inline tree * gimple_omp_atomic_store_val_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return &g->gimple_omp_atomic_store.val; } /* Set the LHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_lhs (gimple g, tree lhs) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); g->gimple_omp_atomic_load.lhs = lhs; } /* Get the LHS of an atomic load. */ static inline tree gimple_omp_atomic_load_lhs (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return g->gimple_omp_atomic_load.lhs; } /* Return a pointer to the LHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_lhs_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return &g->gimple_omp_atomic_load.lhs; } /* Set the RHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_rhs (gimple g, tree rhs) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); g->gimple_omp_atomic_load.rhs = rhs; } /* Get the RHS of an atomic load. */ static inline tree gimple_omp_atomic_load_rhs (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return g->gimple_omp_atomic_load.rhs; } /* Return a pointer to the RHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_rhs_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return &g->gimple_omp_atomic_load.rhs; } /* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_def (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return g->gimple_omp_continue.control_def; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_def_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return &g->gimple_omp_continue.control_def; } /* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_def (gimple g, tree def) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); g->gimple_omp_continue.control_def = def; } /* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_use (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return g->gimple_omp_continue.control_use; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_use_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return &g->gimple_omp_continue.control_use; } /* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_use (gimple g, tree use) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); g->gimple_omp_continue.control_use = use; } /* Return a pointer to the return value for GIMPLE_RETURN GS. */ static inline tree * gimple_return_retval_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op_ptr (gs, 0); } /* Return the return value for GIMPLE_RETURN GS. */ static inline tree gimple_return_retval (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op (gs, 0); } /* Set RETVAL to be the return value for GIMPLE_RETURN GS. */ static inline void gimple_return_set_retval (gimple gs, tree retval) { GIMPLE_CHECK (gs, GIMPLE_RETURN); gimple_set_op (gs, 0, retval); } /* Returns true when the gimple statment STMT is any of the OpenMP types. */ #define CASE_GIMPLE_OMP \ case GIMPLE_OMP_PARALLEL: \ case GIMPLE_OMP_TASK: \ case GIMPLE_OMP_FOR: \ case GIMPLE_OMP_SECTIONS: \ case GIMPLE_OMP_SECTIONS_SWITCH: \ case GIMPLE_OMP_SINGLE: \ case GIMPLE_OMP_SECTION: \ case GIMPLE_OMP_MASTER: \ case GIMPLE_OMP_ORDERED: \ case GIMPLE_OMP_CRITICAL: \ case GIMPLE_OMP_RETURN: \ case GIMPLE_OMP_ATOMIC_LOAD: \ case GIMPLE_OMP_ATOMIC_STORE: \ case GIMPLE_OMP_CONTINUE static inline bool is_gimple_omp (const_gimple stmt) { switch (gimple_code (stmt)) { CASE_GIMPLE_OMP: return true; default: return false; } } /* Returns TRUE if statement G is a GIMPLE_NOP. */ static inline bool gimple_nop_p (const_gimple g) { return gimple_code (g) == GIMPLE_NOP; } /* Return true if GS is a GIMPLE_RESX. */ static inline bool is_gimple_resx (const_gimple gs) { return gimple_code (gs) == GIMPLE_RESX; } /* Return the predictor of GIMPLE_PREDICT statement GS. */ static inline enum br_predictor gimple_predict_predictor (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (enum br_predictor) (gs->gsbase.subcode & ~GF_PREDICT_TAKEN); } /* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */ static inline void gimple_predict_set_predictor (gimple gs, enum br_predictor predictor) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); gs->gsbase.subcode = (gs->gsbase.subcode & GF_PREDICT_TAKEN) | (unsigned) predictor; } /* Return the outcome of GIMPLE_PREDICT statement GS. */ static inline enum prediction gimple_predict_outcome (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (gs->gsbase.subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN; } /* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */ static inline void gimple_predict_set_outcome (gimple gs, enum prediction outcome) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); if (outcome == TAKEN) gs->gsbase.subcode |= GF_PREDICT_TAKEN; else gs->gsbase.subcode &= ~GF_PREDICT_TAKEN; } /* Return the type of the main expression computed by STMT. Return void_type_node if the statement computes nothing. */ static inline tree gimple_expr_type (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL) { tree type; /* In general we want to pass out a type that can be substituted for both the RHS and the LHS types if there is a possibly useless conversion involved. That means returning the original RHS type as far as we can reconstruct it. */ if (code == GIMPLE_CALL) type = gimple_call_return_type (stmt); else switch (gimple_assign_rhs_code (stmt)) { case POINTER_PLUS_EXPR: type = TREE_TYPE (gimple_assign_rhs1 (stmt)); break; default: /* As fallback use the type of the LHS. */ type = TREE_TYPE (gimple_get_lhs (stmt)); break; } return type; } else if (code == GIMPLE_COND) return boolean_type_node; else return void_type_node; } /* Return a new iterator pointing to GIMPLE_SEQ's first statement. */ static inline gimple_stmt_iterator gsi_start (gimple_seq seq) { gimple_stmt_iterator i; i.ptr = gimple_seq_first (seq); i.seq = seq; i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL; return i; } /* Return a new iterator pointing to the first statement in basic block BB. */ static inline gimple_stmt_iterator gsi_start_bb (basic_block bb) { gimple_stmt_iterator i; gimple_seq seq; seq = bb_seq (bb); i.ptr = gimple_seq_first (seq); i.seq = seq; i.bb = bb; return i; } /* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */ static inline gimple_stmt_iterator gsi_last (gimple_seq seq) { gimple_stmt_iterator i; i.ptr = gimple_seq_last (seq); i.seq = seq; i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL; return i; } /* Return a new iterator pointing to the last statement in basic block BB. */ static inline gimple_stmt_iterator gsi_last_bb (basic_block bb) { gimple_stmt_iterator i; gimple_seq seq; seq = bb_seq (bb); i.ptr = gimple_seq_last (seq); i.seq = seq; i.bb = bb; return i; } /* Return true if I is at the end of its sequence. */ static inline bool gsi_end_p (gimple_stmt_iterator i) { return i.ptr == NULL; } /* Return true if I is one statement before the end of its sequence. */ static inline bool gsi_one_before_end_p (gimple_stmt_iterator i) { return i.ptr != NULL && i.ptr->next == NULL; } /* Advance the iterator to the next gimple statement. */ static inline void gsi_next (gimple_stmt_iterator *i) { i->ptr = i->ptr->next; } /* Advance the iterator to the previous gimple statement. */ static inline void gsi_prev (gimple_stmt_iterator *i) { i->ptr = i->ptr->prev; } /* Return the current stmt. */ static inline gimple gsi_stmt (gimple_stmt_iterator i) { return i.ptr->stmt; } /* Return a block statement iterator that points to the first non-label statement in block BB. */ static inline gimple_stmt_iterator gsi_after_labels (basic_block bb) { gimple_stmt_iterator gsi = gsi_start_bb (bb); while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL) gsi_next (&gsi); return gsi; } /* Advance the iterator to the next non-debug gimple statement. */ static inline void gsi_next_nondebug (gimple_stmt_iterator *i) { do { gsi_next (i); } while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); } /* Advance the iterator to the next non-debug gimple statement. */ static inline void gsi_prev_nondebug (gimple_stmt_iterator *i) { do { gsi_prev (i); } while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); } /* Return a new iterator pointing to the first non-debug statement in basic block BB. */ static inline gimple_stmt_iterator gsi_start_nondebug_bb (basic_block bb) { gimple_stmt_iterator i = gsi_start_bb (bb); if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) gsi_next_nondebug (&i); return i; } /* Return a new iterator pointing to the last non-debug statement in basic block BB. */ static inline gimple_stmt_iterator gsi_last_nondebug_bb (basic_block bb) { gimple_stmt_iterator i = gsi_last_bb (bb); if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) gsi_prev_nondebug (&i); return i; } /* Return a pointer to the current stmt. NOTE: You may want to use gsi_replace on the iterator itself, as this performs additional bookkeeping that will not be done if you simply assign through a pointer returned by gsi_stmt_ptr. */ static inline gimple * gsi_stmt_ptr (gimple_stmt_iterator *i) { return &i->ptr->stmt; } /* Return the basic block associated with this iterator. */ static inline basic_block gsi_bb (gimple_stmt_iterator i) { return i.bb; } /* Return the sequence associated with this iterator. */ static inline gimple_seq gsi_seq (gimple_stmt_iterator i) { return i.seq; } enum gsi_iterator_update { GSI_NEW_STMT, /* Only valid when single statement is added, move iterator to it. */ GSI_SAME_STMT, /* Leave the iterator at the same statement. */ GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable for linking other statements in the same direction. */ }; /* In gimple-iterator.c */ gimple_stmt_iterator gsi_start_phis (basic_block); gimple_seq gsi_split_seq_after (gimple_stmt_iterator); gimple_seq gsi_split_seq_before (gimple_stmt_iterator *); void gsi_replace (gimple_stmt_iterator *, gimple, bool); void gsi_insert_before (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_seq_before_without_update (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_after (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_seq_after_without_update (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_remove (gimple_stmt_iterator *, bool); gimple_stmt_iterator gsi_for_stmt (gimple); void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *); void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *); void gsi_move_to_bb_end (gimple_stmt_iterator *, struct basic_block_def *); void gsi_insert_on_edge (edge, gimple); void gsi_insert_seq_on_edge (edge, gimple_seq); basic_block gsi_insert_on_edge_immediate (edge, gimple); basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq); void gsi_commit_one_edge_insert (edge, basic_block *); void gsi_commit_edge_inserts (void); gimple gimple_call_copy_skip_args (gimple, bitmap); /* Convenience routines to walk all statements of a gimple function. Note that this is useful exclusively before the code is converted into SSA form. Once the program is in SSA form, the standard operand interface should be used to analyze/modify statements. */ struct walk_stmt_info { /* Points to the current statement being walked. */ gimple_stmt_iterator gsi; /* Additional data that the callback functions may want to carry through the recursion. */ void *info; /* Pointer map used to mark visited tree nodes when calling walk_tree on each operand. If set to NULL, duplicate tree nodes will be visited more than once. */ struct pointer_set_t *pset; /* Indicates whether the operand being examined may be replaced with something that matches is_gimple_val (if true) or something slightly more complicated (if false). "Something" technically means the common subset of is_gimple_lvalue and is_gimple_rhs, but we never try to form anything more complicated than that, so we don't bother checking. Also note that CALLBACK should update this flag while walking the sub-expressions of a statement. For instance, when walking the statement 'foo (&var)', the flag VAL_ONLY will initially be set to true, however, when walking &var, the operand of that ADDR_EXPR does not need to be a GIMPLE value. */ bool val_only; /* True if we are currently walking the LHS of an assignment. */ bool is_lhs; /* Optional. Set to true by the callback functions if they made any changes. */ bool changed; /* True if we're interested in location information. */ bool want_locations; /* Operand returned by the callbacks. This is set when calling walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback returns non-NULL, this field will contain the tree returned by the last callback. */ tree callback_result; }; /* Callback for walk_gimple_stmt. Called for every statement found during traversal. The first argument points to the statement to walk. The second argument is a flag that the callback sets to 'true' if it the callback handled all the operands and sub-statements of the statement (the default value of this flag is 'false'). The third argument is an anonymous pointer to data to be used by the callback. */ typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *, struct walk_stmt_info *); gimple walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn, struct walk_stmt_info *); tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, walk_tree_fn, struct walk_stmt_info *); tree walk_gimple_op (gimple, walk_tree_fn, struct walk_stmt_info *); #ifdef GATHER_STATISTICS /* Enum and arrays used for allocation stats. Keep in sync with gimple.c:gimple_alloc_kind_names. */ enum gimple_alloc_kind { gimple_alloc_kind_assign, /* Assignments. */ gimple_alloc_kind_phi, /* PHI nodes. */ gimple_alloc_kind_cond, /* Conditionals. */ gimple_alloc_kind_seq, /* Sequences. */ gimple_alloc_kind_rest, /* Everything else. */ gimple_alloc_kind_all }; extern int gimple_alloc_counts[]; extern int gimple_alloc_sizes[]; /* Return the allocation kind for a given stmt CODE. */ static inline enum gimple_alloc_kind gimple_alloc_kind (enum gimple_code code) { switch (code) { case GIMPLE_ASSIGN: return gimple_alloc_kind_assign; case GIMPLE_PHI: return gimple_alloc_kind_phi; case GIMPLE_COND: return gimple_alloc_kind_cond; default: return gimple_alloc_kind_rest; } } #endif /* GATHER_STATISTICS */ extern void dump_gimple_statistics (void); /* In gimple-fold.c. */ void gimplify_and_update_call_from_tree (gimple_stmt_iterator *, tree); tree gimple_fold_builtin (gimple); bool fold_stmt (gimple_stmt_iterator *); bool fold_stmt_inplace (gimple); tree maybe_fold_offset_to_address (location_t, tree, tree, tree); tree maybe_fold_offset_to_reference (location_t, tree, tree, tree); tree maybe_fold_stmt_addition (location_t, tree, tree, tree); tree get_symbol_constant_value (tree); tree canonicalize_constructor_val (tree); bool may_propagate_address_into_dereference (tree, tree); extern tree maybe_fold_and_comparisons (enum tree_code, tree, tree, enum tree_code, tree, tree); extern tree maybe_fold_or_comparisons (enum tree_code, tree, tree, enum tree_code, tree, tree); #endif /* GCC_GIMPLE_H */
sum.h
// // Created by demeuren on 26/01/18. // #ifndef COMPENSATIONS_SUM_H #define COMPENSATIONS_SUM_H #include <algorithm> #include <functional> #include <random> #include "../shaman/Shaman.h" #include "various.h" /* * computing time for 50000000 elements on nestor's laptop using clang++5.0 * S : 10s * double : 6.5s * => 1.5 slower * * on TERATEC's desktop + inline * S : 2.4s * double : 1.7s * => 1.4x slower * S + numerical debugging : 29s * => 17x slower */ void Compensatedsum(int size) { // data generation std::random_device rnd_device; std::mt19937 mersenne_engine(rnd_device()); // Specify the engine and distribution. std::uniform_real_distribution<double> dist(1.0e10-15, 2.0e-15); auto gen = std::bind(dist, mersenne_engine); std::vector<double> data(size); generate(begin(data), end(data), gen); LOCAL_BLOCK("loop"); // usual sum Sdouble sum = 0.0; // kahan sum Sdouble kahanSum = 0.0; Sdouble compensation = 0.0; // improved kahan sum Sdouble improvedSum = 0.0; Sdouble compensation2 = 0.0; for(Sdouble x : data) { // usual sum sum += x; // kahan sum Sdouble y = x - compensation; Sdouble t = kahanSum + y; compensation = (t - kahanSum) - y; kahanSum = t; // improved kahan Sdouble t2 = improvedSum + x; if(Sstd::abs(improvedSum) >= Sstd::abs(x)) { compensation2 += (improvedSum - t2) + x; } else { compensation2 += (x - t2) + improvedSum; } improvedSum = t2; } LOCAL_BLOCK("final_compensation"); improvedSum += compensation2; std::cout << "sum: " << sum << std::endl; std::cout << "improved_sum: " << improvedSum << std::endl; std::cout << "kahan_sum: " << kahanSum << std::endl; } void parralelSum(int size) { // data generation std::random_device rnd_device; std::mt19937 mersenne_engine(rnd_device()); // Specify the engine and distribution. std::uniform_real_distribution<double> dist(1.0e10-15, 2.0e-15); auto gen = std::bind(dist, mersenne_engine); std::vector<double> data(size); generate(begin(data), end(data), gen); // sequential sum Sdouble seqSum = 0; for(Sdouble x : data) { seqSum += x; } std::cout << "sequential sum\t:\t" << seqSum << std::endl; #ifdef _OPENMP // parrallel sum with reduce /* number parSumR = 0; #pragma omp parallel for reduction(+:parSumR) for(unsigned int i = 0; i < data.size(); i++) { parSumR += data[i]; } std::cout << "parallel sum\t:\t" << parSumR << " (with reduce)" << std::endl; */ // parrallel sum without reduce Sdouble parSum = 0; #pragma omp parallel { Sdouble localParSum = 0; #pragma omp for for(unsigned int i = 0; i < data.size(); i++) { localParSum += data[i]; } #pragma omp critical parSum += localParSum; }; std::cout << "parallel sum\t:\t" << parSum << " (without reduce)" << std::endl; #endif //_OPENMP } #endif //COMPENSATIONS_SUM_H
convolution_3x3_packn_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt) { const int packn = csrr_vlenb() / 2; // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = pb-pa-inch/pa-64-outch/pb kernel_tm_packn.create(inch / packn, 64, outch / packn, (size_t)2u * packn * packn, packn * packn); for (int q = 0; q + (packn - 1) < outch; q += packn) { Mat g0 = kernel_tm_packn.channel(q / packn); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + (packn - 1) < inch; p += packn) { for (int i = 0; i < packn; i++) { for (int j = 0; j < packn; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd64_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); // NOTE c99 variable length array __fp16 tmp[8][8][packn]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * packn; for (int m = 0; m < 8; m++) { vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl); vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _r05 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _r06 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _r07 = vle16_v_f16m1(r0 + packn * 7, vl); vfloat16m1_t _tmp0m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r00, _r06, vl), 5.25f, vfsub_vv_f16m1(_r04, _r02, vl), vl); vfloat16m1_t _tmp7m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r07, _r01, vl), 5.25f, vfsub_vv_f16m1(_r03, _r05, vl), vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[7][m], _tmp7m, vl); vfloat16m1_t _tmp12a = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r02, _r06, vl), -4.25f, _r04, vl); vfloat16m1_t _tmp12b = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r01, _r05, vl), -4.25f, _r03, vl); vfloat16m1_t _tmp1m = vfadd_vv_f16m1(_tmp12a, _tmp12b, vl); vfloat16m1_t _tmp2m = vfsub_vv_f16m1(_tmp12a, _tmp12b, vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vfloat16m1_t _tmp34a = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r06, 0.25f, _r02, vl), -1.25f, _r04, vl); vfloat16m1_t _tmp34b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_r01, 0.5f, vl), -2.5f, _r03, vl), 2.f, _r05, vl); vfloat16m1_t _tmp3m = vfadd_vv_f16m1(_tmp34a, _tmp34b, vl); vfloat16m1_t _tmp4m = vfsub_vv_f16m1(_tmp34a, _tmp34b, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); vse16_v_f16m1(tmp[4][m], _tmp4m, vl); vfloat16m1_t _tmp56a = vfmacc_vf_f16m1(_r06, 4.f, vfmacc_vf_f16m1(_r02, -1.25f, _r04, vl), vl); vfloat16m1_t _tmp56b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_r01, 2.f, vl), -2.5f, _r03, vl), 0.5f, _r05, vl); vfloat16m1_t _tmp5m = vfadd_vv_f16m1(_tmp56a, _tmp56b, vl); vfloat16m1_t _tmp6m = vfsub_vv_f16m1(_tmp56a, _tmp56b, vl); vse16_v_f16m1(tmp[5][m], _tmp5m, vl); vse16_v_f16m1(tmp[6][m], _tmp6m, vl); r0 += w * packn; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * packn; __fp16* r0_tm_1 = r0_tm_0 + tiles * packn; __fp16* r0_tm_2 = r0_tm_0 + tiles * packn * 2; __fp16* r0_tm_3 = r0_tm_0 + tiles * packn * 3; __fp16* r0_tm_4 = r0_tm_0 + tiles * packn * 4; __fp16* r0_tm_5 = r0_tm_0 + tiles * packn * 5; __fp16* r0_tm_6 = r0_tm_0 + tiles * packn * 6; __fp16* r0_tm_7 = r0_tm_0 + tiles * packn * 7; for (int m = 0; m < 8; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _tmp06 = vle16_v_f16m1(tmp[m][6], vl); vfloat16m1_t _tmp07 = vle16_v_f16m1(tmp[m][7], vl); vfloat16m1_t _r0tm0 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp00, _tmp06, vl), 5.25f, vfsub_vv_f16m1(_tmp04, _tmp02, vl), vl); vfloat16m1_t _r0tm7 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp07, _tmp01, vl), 5.25f, vfsub_vv_f16m1(_tmp03, _tmp05, vl), vl); vfloat16m1_t _tmp12a = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp02, _tmp06, vl), -4.25f, _tmp04, vl); vfloat16m1_t _tmp12b = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp01, _tmp05, vl), -4.25f, _tmp03, vl); vfloat16m1_t _r0tm1 = vfadd_vv_f16m1(_tmp12a, _tmp12b, vl); vfloat16m1_t _r0tm2 = vfsub_vv_f16m1(_tmp12a, _tmp12b, vl); vfloat16m1_t _tmp34a = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp06, 0.25f, _tmp02, vl), -1.25f, _tmp04, vl); vfloat16m1_t _tmp34b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_tmp01, 0.5f, vl), -2.5f, _tmp03, vl), 2.f, _tmp05, vl); vfloat16m1_t _r0tm3 = vfadd_vv_f16m1(_tmp34a, _tmp34b, vl); vfloat16m1_t _r0tm4 = vfsub_vv_f16m1(_tmp34a, _tmp34b, vl); vfloat16m1_t _tmp56a = vfmacc_vf_f16m1(_tmp06, 4.f, vfmacc_vf_f16m1(_tmp02, -1.25f, _tmp04, vl), vl); vfloat16m1_t _tmp56b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_tmp01, 2.f, vl), -2.5f, _tmp03, vl), 0.5f, _tmp05, vl); vfloat16m1_t _r0tm5 = vfadd_vv_f16m1(_tmp56a, _tmp56b, vl); vfloat16m1_t _r0tm6 = vfsub_vv_f16m1(_tmp56a, _tmp56b, vl); vse16_v_f16m1(r0_tm_0, _r0tm0, vl); vse16_v_f16m1(r0_tm_1, _r0tm1, vl); vse16_v_f16m1(r0_tm_2, _r0tm2, vl); vse16_v_f16m1(r0_tm_3, _r0tm3, vl); vse16_v_f16m1(r0_tm_4, _r0tm4, vl); vse16_v_f16m1(r0_tm_5, _r0tm5, vl); vse16_v_f16m1(r0_tm_6, _r0tm6, vl); vse16_v_f16m1(r0_tm_7, _r0tm7, vl); r0_tm_0 += tiles * packn * 8; r0_tm_1 += tiles * packn * 8; r0_tm_2 += tiles * packn * 8; r0_tm_3 += tiles * packn * 8; r0_tm_4 += tiles * packn * 8; r0_tm_5 += tiles * packn * 8; r0_tm_6 += tiles * packn * 8; r0_tm_7 += tiles * packn * 8; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tmpptr = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr[4] = r0[l + packn * 4]; tmpptr[5] = r0[l + packn * 5]; tmpptr[6] = r0[l + packn * 6]; tmpptr[7] = r0[l + packn * 7]; tmpptr += 8; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl); vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 8; #endif } } for (; i + 3 < tiles; i += 4) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr += 4; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 4; #endif } } for (; i + 1 < tiles; i += 2) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr += 2; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 2; #endif } } for (; i < tiles; i++) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(tmpptr, _val, vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; __fp16 val4 = *r0++; __fp16 val5 = *r0++; __fp16 val6 = *r0++; __fp16 val7 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); _sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl); _sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl); _sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl); _sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl); vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl); vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl); vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl); output0_tm += packn * 8; } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); output0_tm += packn * 4; } for (; i + 1 < tiles; i += 2) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); output0_tm += packn * 2; } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum, vl); output0_tm += packn; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; vfloat16m1_t _bias0 = bias ? vle16_v_f16m1((const __fp16*)bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl); // NOTE c99 variable length array __fp16 tmp[6][8][packn]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * packn; const __fp16* output0_tm_1 = output0_tm_0 + tiles * packn; const __fp16* output0_tm_2 = output0_tm_0 + tiles * packn * 2; const __fp16* output0_tm_3 = output0_tm_0 + tiles * packn * 3; const __fp16* output0_tm_4 = output0_tm_0 + tiles * packn * 4; const __fp16* output0_tm_5 = output0_tm_0 + tiles * packn * 5; const __fp16* output0_tm_6 = output0_tm_0 + tiles * packn * 6; const __fp16* output0_tm_7 = output0_tm_0 + tiles * packn * 7; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * packn; // TODO rvv optimize for (int m = 0; m < 8; m++) { vfloat16m1_t _out0tm0 = vle16_v_f16m1(output0_tm_0, vl); vfloat16m1_t _out0tm1 = vle16_v_f16m1(output0_tm_1, vl); vfloat16m1_t _out0tm2 = vle16_v_f16m1(output0_tm_2, vl); vfloat16m1_t _out0tm3 = vle16_v_f16m1(output0_tm_3, vl); vfloat16m1_t _out0tm4 = vle16_v_f16m1(output0_tm_4, vl); vfloat16m1_t _out0tm5 = vle16_v_f16m1(output0_tm_5, vl); vfloat16m1_t _out0tm6 = vle16_v_f16m1(output0_tm_6, vl); vfloat16m1_t _out0tm7 = vle16_v_f16m1(output0_tm_7, vl); vfloat16m1_t _tmp024a = vfadd_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp135a = vfsub_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp024b = vfadd_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp135b = vfsub_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp024c = vfadd_vv_f16m1(_out0tm5, _out0tm6, vl); vfloat16m1_t _tmp135c = vfsub_vv_f16m1(_out0tm5, _out0tm6, vl); vfloat16m1_t _tmp0m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm0, _tmp024a, vl), vfmacc_vf_f16m1(_tmp024b, 32.f, _tmp024c, vl), vl); vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl); vfloat16m1_t _tmp4m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vse16_v_f16m1(tmp[4][m], _tmp4m, vl); vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl); vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl); vfloat16m1_t _tmp5m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm7, _tmp135a, vl), vfmacc_vf_f16m1(_tmp135c, 32.f, _tmp135b, vl), vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); vse16_v_f16m1(tmp[5][m], _tmp5m, vl); output0_tm_0 += tiles * packn * 8; output0_tm_1 += tiles * packn * 8; output0_tm_2 += tiles * packn * 8; output0_tm_3 += tiles * packn * 8; output0_tm_4 += tiles * packn * 8; output0_tm_5 += tiles * packn * 8; output0_tm_6 += tiles * packn * 8; output0_tm_7 += tiles * packn * 8; } for (int m = 0; m < 6; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _tmp06 = vle16_v_f16m1(tmp[m][6], vl); vfloat16m1_t _tmp07 = vle16_v_f16m1(tmp[m][7], vl); vfloat16m1_t _tmp024a = vfadd_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp135a = vfsub_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp024b = vfadd_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _tmp135b = vfsub_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _tmp024c = vfadd_vv_f16m1(_tmp05, _tmp06, vl); vfloat16m1_t _tmp135c = vfsub_vv_f16m1(_tmp05, _tmp06, vl); vfloat16m1_t _out00 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp00, _tmp024a, vl), vfmacc_vf_f16m1(_tmp024b, 32.f, _tmp024c, vl), vl), vl); vfloat16m1_t _out02 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl), vl); vfloat16m1_t _out04 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl), vl); vse16_v_f16m1(output0, _out00, vl); vse16_v_f16m1(output0 + packn * 2, _out02, vl); vse16_v_f16m1(output0 + packn * 4, _out04, vl); vfloat16m1_t _out01 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl), vl); vfloat16m1_t _out03 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl), vl); vfloat16m1_t _out05 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp07, _tmp135a, vl), vfmacc_vf_f16m1(_tmp135c, 32.f, _tmp135b, vl), vl), vl); vse16_v_f16m1(output0 + packn, _out01, vl); vse16_v_f16m1(output0 + packn * 3, _out03, vl); vse16_v_f16m1(output0 + packn * 5, _out05, vl); output0 += outw * packn; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt) { const int packn = csrr_vlenb() / 2; // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = pb-pa-inch/pa-36-outch/pb kernel_tm_packn.create(inch / packn, 36, outch / packn, (size_t)2u * packn * packn, packn * packn); for (int q = 0; q + (packn - 1) < outch; q += packn) { Mat g0 = kernel_tm_packn.channel(q / packn); for (int k = 0; k < 36; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + (packn - 1) < inch; p += packn) { for (int i = 0; i < packn; i++) { for (int j = 0; j < packn; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd42_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); // NOTE c99 variable length array __fp16 tmp[6][6][packn]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 4) + (j * 4) * packn; for (int m = 0; m < 6; m++) { vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl); vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _r05 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _tmp0m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r04, 4.f, _r00, vl), -5.f, _r02, vl); vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r04, _r03, vl), -4.f, vfadd_vv_f16m1(_r01, _r02, vl), vl); vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r03, vl), 4.f, vfsub_vv_f16m1(_r01, _r02, vl), vl); vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r02, vl), -2.f, vfsub_vv_f16m1(_r01, _r03, vl), vl); vfloat16m1_t _tmp4m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r02, vl), 2.f, vfsub_vv_f16m1(_r01, _r03, vl), vl); vfloat16m1_t _tmp5m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r05, 4.f, _r01, vl), -5.f, _r03, vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); vse16_v_f16m1(tmp[4][m], _tmp4m, vl); vse16_v_f16m1(tmp[5][m], _tmp5m, vl); r0 += w * packn; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 6 + j) * packn; __fp16* r0_tm_1 = r0_tm_0 + tiles * packn; __fp16* r0_tm_2 = r0_tm_0 + tiles * packn * 2; __fp16* r0_tm_3 = r0_tm_0 + tiles * packn * 3; __fp16* r0_tm_4 = r0_tm_0 + tiles * packn * 4; __fp16* r0_tm_5 = r0_tm_0 + tiles * packn * 5; for (int m = 0; m < 6; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _r0tm0 = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp04, 4.f, _tmp00, vl), -5.f, _tmp02, vl); vfloat16m1_t _r0tm1 = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp04, _tmp03, vl), -4.f, vfadd_vv_f16m1(_tmp01, _tmp02, vl), vl); vfloat16m1_t _r0tm2 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp03, vl), 4.f, vfsub_vv_f16m1(_tmp01, _tmp02, vl), vl); vfloat16m1_t _r0tm3 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp02, vl), -2.f, vfsub_vv_f16m1(_tmp01, _tmp03, vl), vl); vfloat16m1_t _r0tm4 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp02, vl), 2.f, vfsub_vv_f16m1(_tmp01, _tmp03, vl), vl); vfloat16m1_t _r0tm5 = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp05, 4.f, _tmp01, vl), -5.f, _tmp03, vl); vse16_v_f16m1(r0_tm_0, _r0tm0, vl); vse16_v_f16m1(r0_tm_1, _r0tm1, vl); vse16_v_f16m1(r0_tm_2, _r0tm2, vl); vse16_v_f16m1(r0_tm_3, _r0tm3, vl); vse16_v_f16m1(r0_tm_4, _r0tm4, vl); vse16_v_f16m1(r0_tm_5, _r0tm5, vl); r0_tm_0 += tiles * packn * 6; r0_tm_1 += tiles * packn * 6; r0_tm_2 += tiles * packn * 6; r0_tm_3 += tiles * packn * 6; r0_tm_4 += tiles * packn * 6; r0_tm_5 += tiles * packn * 6; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tmpptr = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr[4] = r0[l + packn * 4]; tmpptr[5] = r0[l + packn * 5]; tmpptr[6] = r0[l + packn * 6]; tmpptr[7] = r0[l + packn * 7]; tmpptr += 8; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl); vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 8; #endif } } for (; i + 3 < tiles; i += 4) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr += 4; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 4; #endif } } for (; i + 1 < tiles; i += 2) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if RVV_SPEC_0_7 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr += 2; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 2; #endif } } for (; i < tiles; i++) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(tmpptr, _val, vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; __fp16 val4 = *r0++; __fp16 val5 = *r0++; __fp16 val6 = *r0++; __fp16 val7 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); _sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl); _sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl); _sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl); _sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl); vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl); vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl); vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl); output0_tm += packn * 8; } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); output0_tm += packn * 4; } for (; i + 1 < tiles; i += 2) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); output0_tm += packn * 2; } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum, vl); output0_tm += packn; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; vfloat16m1_t _bias0 = bias ? vle16_v_f16m1((const __fp16*)bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl); // NOTE variable length array __fp16 tmp[4][6][packn]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 6 + j) * packn; const __fp16* output0_tm_1 = output0_tm_0 + tiles * packn; const __fp16* output0_tm_2 = output0_tm_0 + tiles * packn * 2; const __fp16* output0_tm_3 = output0_tm_0 + tiles * packn * 3; const __fp16* output0_tm_4 = output0_tm_0 + tiles * packn * 4; const __fp16* output0_tm_5 = output0_tm_0 + tiles * packn * 5; __fp16* output0 = out0.row<__fp16>(i * 4) + (j * 4) * packn; // TODO rvv optimize for (int m = 0; m < 6; m++) { vfloat16m1_t _out0tm0 = vle16_v_f16m1(output0_tm_0, vl); vfloat16m1_t _out0tm1 = vle16_v_f16m1(output0_tm_1, vl); vfloat16m1_t _out0tm2 = vle16_v_f16m1(output0_tm_2, vl); vfloat16m1_t _out0tm3 = vle16_v_f16m1(output0_tm_3, vl); vfloat16m1_t _out0tm4 = vle16_v_f16m1(output0_tm_4, vl); vfloat16m1_t _out0tm5 = vle16_v_f16m1(output0_tm_5, vl); vfloat16m1_t _tmp02a = vfadd_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp13a = vfsub_vv_f16m1(_out0tm1, _out0tm2, vl); vfloat16m1_t _tmp02b = vfadd_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp13b = vfsub_vv_f16m1(_out0tm3, _out0tm4, vl); vfloat16m1_t _tmp0m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm0, _tmp02a, vl), _tmp02b, vl); vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(_tmp13a, 2.f, _tmp13b, vl); vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(_tmp02a, 4.f, _tmp02b, vl); vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfadd_vv_f16m1(_out0tm5, _tmp13a, vl), 8.f, _tmp13b, vl); vse16_v_f16m1(tmp[0][m], _tmp0m, vl); vse16_v_f16m1(tmp[1][m], _tmp1m, vl); vse16_v_f16m1(tmp[2][m], _tmp2m, vl); vse16_v_f16m1(tmp[3][m], _tmp3m, vl); output0_tm_0 += tiles * packn * 6; output0_tm_1 += tiles * packn * 6; output0_tm_2 += tiles * packn * 6; output0_tm_3 += tiles * packn * 6; output0_tm_4 += tiles * packn * 6; output0_tm_5 += tiles * packn * 6; } for (int m = 0; m < 4; m++) { vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl); vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl); vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl); vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl); vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl); vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl); vfloat16m1_t _tmp02a = vfadd_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp13a = vfsub_vv_f16m1(_tmp01, _tmp02, vl); vfloat16m1_t _tmp02b = vfadd_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _tmp13b = vfsub_vv_f16m1(_tmp03, _tmp04, vl); vfloat16m1_t _out00 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp00, _tmp02a, vl), _tmp02b, vl), vl); vfloat16m1_t _out01 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(_tmp13a, 2.f, _tmp13b, vl), vl); vfloat16m1_t _out02 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(_tmp02a, 4.f, _tmp02b, vl), vl); vfloat16m1_t _out03 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp05, _tmp13a, vl), 8.f, _tmp13b, vl), vl); vse16_v_f16m1(output0, _out00, vl); vse16_v_f16m1(output0 + packn, _out01, vl); vse16_v_f16m1(output0 + packn * 2, _out02, vl); vse16_v_f16m1(output0 + packn * 3, _out03, vl); output0 += outw * packn; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
dropout-inl.h
/*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu */ #ifndef MXNET_OPERATOR_DROPOUT_INL_H_ #define MXNET_OPERATOR_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "./operator_common.h" #include "./mshadow_op.h" #if defined(USE_MKL) && defined(_OPENMP) #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // USE_MKL && _OPENMP namespace dropout { enum DropoutOpInputs {kData}; enum DropoutOpOutputs {kOut, kMask}; enum DropoutOpForwardResource {kRandom}; } // namespace dropout namespace mxnet { namespace op { #if defined(USE_MKL) && defined(_OPENMP) static void bernoulli_generate(int n, double p, int* r) { int seed = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn) int nthr = omp_get_max_threads(); # pragma omp parallel num_threads(nthr) { const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } } #endif // USE_MKL && _OPENMP struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5) .set_range(0, 1) .describe("Fraction of the input that gets dropped out during training time."); } }; // struct DropoutParam template<typename xpu, typename DType> class DropoutOp : public Operator { public: explicit DropoutOp(DropoutParam param) { this->pkeep_ = 1.0f - param.p; } virtual void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); if (ctx.is_train) { Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); #if defined(USE_MKL) && defined(_OPENMP) DType* outptr = out.dptr_; DType* dataptr = data.dptr_; int* maskptr = reinterpret_cast<int*>(mask.dptr_); int count = mask.shape_[0]*mask.shape_[1]; bernoulli_generate(count, this->pkeep_, maskptr); #pragma omp parallel for for (int i = 0; i < count; ++i) { outptr[i] = dataptr[i] * maskptr[i]; } #else Random<xpu> *prnd = ctx.requested[dropout::kRandom].get_random<xpu, real_t>(s); mask = tcast<DType>(F<mshadow_op::threshold>( prnd->uniform(mask.shape_), pkeep_) * (1.0f / pkeep_)); Assign(out, req[dropout::kOut], data * mask); #endif // USE_MKL && _OPENMP } else { Assign(out, req[dropout::kOut], F<mshadow_op::identity>(data)); } } virtual void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(out_grad.size(), 1U); CHECK_EQ(in_grad.size(), 1U); Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); #if defined(USE_MKL) && defined(_OPENMP) DType* ingradptr = gdata.dptr_; DType* outgradptr = grad.dptr_; int* maskptr = reinterpret_cast<int*>(mask.dptr_); int count = mask.shape_[0]*mask.shape_[1]; #pragma omp parallel for for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i]; } #else // USE_MKL && _OPENMP Assign(gdata, req[dropout::kData], grad * mask); #endif // USE_MKL && _OPENMP } private: real_t pkeep_; }; // class DropoutOp template<typename xpu> Operator *CreateOp(DropoutParam param, int dtype); #if DMLC_USE_CXX11 class DropoutProp : public OperatorProperty { public: void Init(const std::vector<std::pair<std::string, std::string> >& kwargs) override { param_.Init(kwargs); } std::map<std::string, std::string> GetParams() const override { return param_.__DICT__(); } bool InferShape(std::vector<TShape> *in_shape, std::vector<TShape> *out_shape, std::vector<TShape> *aux_shape) const override { using namespace mshadow; CHECK_EQ(in_shape->size(), 1U); const TShape &dshape = in_shape->at(0); if (dshape.ndim() == 0) return false; out_shape->clear(); out_shape->push_back(dshape); out_shape->push_back(dshape); return true; } bool InferType(std::vector<int> *in_type, std::vector<int> *out_type, std::vector<int> *aux_type) const override { CHECK_EQ(in_type->size(), 1U); int dtype = in_type->at(0); if (dtype == -1) { LOG(FATAL) << "input type to dropout is not specified."; return false; } size_t nout = this->ListOutputs().size(); out_type->clear(); for (size_t i = 0; i < nout; ++i) out_type->push_back(dtype); return true; } OperatorProperty* Copy() const override { auto ptr = new DropoutProp(); ptr->param_ = param_; return ptr; } std::string TypeString() const override { return "Dropout"; } std::vector<int> DeclareBackwardDependency( const std::vector<int> &out_grad, const std::vector<int> &in_data, const std::vector<int> &out_data) const override { return {out_grad[dropout::kOut], out_data[dropout::kMask]}; } std::vector<std::pair<int, void*> > BackwardInplaceOption( const std::vector<int> &out_grad, const std::vector<int> &in_data, const std::vector<int> &out_data, const std::vector<void*> &in_grad) const override { return {{out_grad[dropout::kOut], in_grad[dropout::kData]}}; } std::vector<std::pair<int, void*> > ForwardInplaceOption( const std::vector<int> &in_data, const std::vector<void*> &out_data) const override { return {{in_data[dropout::kData], out_data[dropout::kOut]}}; } std::vector<ResourceRequest> ForwardResource( const std::vector<TShape> &in_shape) const override { return {ResourceRequest::kRandom}; } int NumVisibleOutputs() const override { return 1; } int NumOutputs() const override { return 2; } std::vector<std::string> ListOutputs() const override { return {"output", "mask"}; } Operator* CreateOperator(Context ctx) const override { LOG(FATAL) << "Not Implemented"; return NULL; } Operator* CreateOperatorEx(Context ctx, std::vector<TShape> *in_shape, std::vector<int> *in_type) const override; private: DropoutParam param_; }; // class DropoutProp #endif // DMLC_USE_CXX11 } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_DROPOUT_INL_H_
ompfor4.c
/* * Dynamic schedule */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int a[20]; void foo(int lower, int upper, int stride) { int i; #pragma omp for schedule(dynamic) for (i=lower;i>upper;i-=stride) { a[i]=i*2; printf("Iteration %2d is carried out by thread %2d\n",\ i, omp_get_thread_num()); } } int main(void) { #pragma omp parallel { #pragma omp single printf ("Using %d threads.\n",omp_get_num_threads()); foo(0,20,3); } return 0; }
dpado.202001232110.limit_distances.h
// // Created by Zhen Peng on 1/6/20. // #ifndef PADO_DPADO_H #define PADO_DPADO_H #include <vector> //#include <unordered_map> #include <map> #include <algorithm> #include <iostream> #include <limits.h> //#include <xmmintrin.h> #include <immintrin.h> #include <bitset> #include <math.h> #include <fstream> #include <omp.h> #include "globals.h" #include "dglobals.h" #include "dgraph.h" namespace PADO { template <VertexID BATCH_SIZE = 1024> class DistBVCPLL { private: static const VertexID BITPARALLEL_SIZE = 50; const inti THRESHOLD_PARALLEL = 80; // Structure for the type of label struct IndexType { // struct Batch { // VertexID batch_id; // Batch ID // VertexID start_index; // Index to the array distances where the batch starts // VertexID size; // Number of distances element in this batch // // Batch() = default; // Batch(VertexID batch_id_, VertexID start_index_, VertexID size_): // batch_id(batch_id_), start_index(start_index_), size(size_) // { } // }; struct DistanceIndexType { VertexID start_index; // Index to the array vertices where the same-distance vertices start VertexID size; // Number of the same-distance vertices UnweightedDist dist; // The real distance DistanceIndexType() = default; DistanceIndexType(VertexID start_index_, VertexID size_, UnweightedDist dist_): start_index(start_index_), size(size_), dist(dist_) { } }; // Bit-parallel Labels UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} // std::vector<Batch> batches; // Batch info std::vector<DistanceIndexType> distances; // Distance info std::vector<VertexID> vertices; // Vertices in the label, presented as temporary ID size_t get_size_in_bytes() const { return sizeof(bp_dist) + sizeof(bp_sets) + // batches.size() * sizeof(Batch) + distances.size() * sizeof(DistanceIndexType) + vertices.size() * sizeof(VertexID); } void clean_all_indices() { std::vector<DistanceIndexType>().swap(distances); std::vector<VertexID>().swap(vertices); } }; //__attribute__((aligned(64))); struct ShortIndex { // I use BATCH_SIZE + 1 bit for indicator bit array. // The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already. // In this way, it helps update_label_indices() and can be reset along with other indicator elements. // std::bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already // If the Batch structure is not used, the indicator could just be BATCH_SIZE long. std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE, 0); // std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE + 1, 0); // Use a queue to store candidates std::vector<VertexID> candidates_que = std::vector<VertexID>(BATCH_SIZE); VertexID end_candidates_que = 0; std::vector<uint8_t> is_candidate = std::vector<uint8_t>(BATCH_SIZE, 0); void indicator_reset() { std::fill(indicator.begin(), indicator.end(), 0); } }; //__attribute__((aligned(64))); // Type of Bit-Parallel Label struct BPLabelType { UnweightedDist bp_dist[BITPARALLEL_SIZE] = { 0 }; uint64_t bp_sets[BITPARALLEL_SIZE][2] = { {0} }; // [0]: S^{-1}, [1]: S^{0} }; // Type of Label Message Unit, for initializing distance table struct LabelTableUnit { VertexID root_id; VertexID label_global_id; UnweightedDist dist; LabelTableUnit() = default; LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : root_id(r), label_global_id(l), dist(d) {} }; // Type of BitParallel Label Message Unit for initializing bit-parallel labels struct MsgBPLabel { VertexID r_root_id; UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; MsgBPLabel() = default; MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) : r_root_id(r) { memcpy(bp_dist, dist, sizeof(bp_dist)); memcpy(bp_sets, sets, sizeof(bp_sets)); } }; VertexID num_v = 0; VertexID num_masters = 0; // VertexID BATCH_SIZE = 0; int host_id = 0; int num_hosts = 0; MPI_Datatype V_ID_Type; std::vector<IndexType> L; inline void bit_parallel_push_labels( const DistGraph &G, VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, UnweightedDist iter); inline void bit_parallel_labeling( const DistGraph &G, std::vector<uint8_t> &used_bp_roots); // inline void bit_parallel_push_labels( // const DistGraph &G, // VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // UnweightedDist iter); // inline void bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots); inline void batch_process( const DistGraph &G, // const VertexID b_id, const VertexID roots_start, const VertexID roots_size, const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated); // std::vector<bool> &once_candidated); inline VertexID initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, // VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots); // inline void push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline void schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter); inline void local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); inline void local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); // inline void local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline void schedule_label_inserting_para( const DistGraph &G, const VertexID roots_start, const VertexID roots_size, std::vector<ShortIndex> &short_index, const std::vector< std::vector<UnweightedDist> > &dist_table, const std::vector<VertexID> &got_candidates_queue, const VertexID start_got_candidates_queue, const VertexID size_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<uint8_t> &is_active, std::vector< std::pair<VertexID, VertexID> > &buffer_send, const VertexID iter); inline bool distance_query( VertexID cand_root_id, VertexID v_id, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter); inline void insert_label_only_seq( VertexID cand_root_id, // VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send); // UnweightedDist iter); inline void insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send); inline void update_label_indices( const VertexID v_id, const VertexID inserted_count, // std::vector<IndexType> &L, // std::vector<ShortIndex> &short_index, // VertexID b_id, const UnweightedDist iter); inline void reset_at_end( const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, const std::vector<VertexID> &once_candidated_queue, const VertexID end_once_candidated_queue); // template <typename E_T, typename F> // inline void every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun); template <typename E_T> inline void one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv); // // Function: get the destination host id which is i hop from this host. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_me_host_id(int hop) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // return (host_id + hop + num_hosts) % num_hosts; // } // // Function: get the destination host id which is i hop from the root. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_root_host_id(int hop, int root) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // assert(root >= 0 && root < num_hosts); // return (root + hop + num_hosts) % num_hosts; // } size_t get_index_size() { size_t bytes = 0; for (VertexID v_i = 0; v_i < num_masters; ++v_i) { bytes += L[v_i].get_size_in_bytes(); } return bytes; } // Test only // uint64_t normal_hit_count = 0; // uint64_t bp_hit_count = 0; // uint64_t total_check_count = 0; // uint64_t normal_check_count = 0; // uint64_t total_candidates_num = 0; // uint64_t set_candidates_num = 0; // double initializing_time = 0; // double candidating_time = 0; // double adding_time = 0; // double distance_query_time = 0; // double init_index_time = 0; // double init_dist_matrix_time = 0; // double init_start_reset_time = 0; // double init_indicators_time = 0; //L2CacheMissRate cache_miss; // double message_time = 0; // double bp_labeling_time = 0; // double initializing_time = 0; // double scatter_time = 0; // double gather_time = 0; // double clearup_time = 0; // TotalInstructsExe candidating_ins_count; // TotalInstructsExe adding_ins_count; // TotalInstructsExe bp_labeling_ins_count; // TotalInstructsExe bp_checking_ins_count; // TotalInstructsExe dist_query_ins_count; // uint64_t caller_line = 0; // End test public: // std::pair<uint64_t, uint64_t> length_larger_than_16 = std::make_pair(0, 0); DistBVCPLL() = default; explicit DistBVCPLL( const DistGraph &G); // UnweightedDist dist_distance_query_pair( // VertexID a_global, // VertexID b_global, // const DistGraph &G); }; // class DistBVCPLL template <VertexID BATCH_SIZE> DistBVCPLL<BATCH_SIZE>:: DistBVCPLL( const DistGraph &G) { num_v = G.num_v; assert(num_v >= BATCH_SIZE); num_masters = G.num_masters; host_id = G.host_id; // { // if (1 == host_id) { // volatile int i = 0; // while (i == 0) { // sleep(5); // } // } // } num_hosts = G.num_hosts; V_ID_Type = G.V_ID_Type; // L.resize(num_v); L.resize(num_masters); VertexID remainer = num_v % BATCH_SIZE; VertexID b_i_bound = num_v / BATCH_SIZE; std::vector<uint8_t> used_bp_roots(num_v, 0); //cache_miss.measure_start(); double time_labeling = -WallTimer::get_time_mark(); // bp_labeling_time -= WallTimer::get_time_mark(); bit_parallel_labeling(G, used_bp_roots); // bp_labeling_time += WallTimer::get_time_mark(); {//test //#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("host_id: %u bp_labeling_finished.\n", host_id); } //#endif } std::vector<VertexID> active_queue(num_masters); // Any vertex v who is active should be put into this queue. VertexID end_active_queue = 0; std::vector<uint8_t> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. // std::vector<bool> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. std::vector<VertexID> got_candidates_queue(num_masters); // Any vertex v who got candidates should be put into this queue. VertexID end_got_candidates_queue = 0; std::vector<uint8_t> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue // std::vector<bool> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue std::vector<ShortIndex> short_index(num_masters); std::vector< std::vector<UnweightedDist> > dist_table(BATCH_SIZE, std::vector<UnweightedDist>(num_v, MAX_UNWEIGHTED_DIST)); std::vector<VertexID> once_candidated_queue(num_masters); // if short_index[v].indicator.any() is true, v is in the queue. // Used mainly for resetting short_index[v].indicator. VertexID end_once_candidated_queue = 0; std::vector<uint8_t> once_candidated(num_masters, false); // std::vector<bool> once_candidated(num_masters, false); std::vector< std::vector<VertexID> > recved_dist_table(BATCH_SIZE); // Some distances are from other hosts. This is used to reset the dist_table. std::vector<BPLabelType> bp_labels_table(BATCH_SIZE); // All roots' bit-parallel labels //printf("b_i_bound: %u\n", b_i_bound);//test for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // {// Batch number limit // if (10 == b_i) { // remainer = 0; // break; // } // } //// { //////#ifdef DEBUG_MESSAGES_ON // if (0 == host_id) { // printf("b_i: %u\n", b_i);//test // } //////#endif //// } batch_process( G, // b_i, b_i * BATCH_SIZE, BATCH_SIZE, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // exit(EXIT_SUCCESS); //test } if (remainer != 0) { // { ////#ifdef DEBUG_MESSAGES_ON // if (0 == host_id) { // printf("b_i: %u\n", b_i_bound);//test // } ////#endif // } batch_process( G, // b_i_bound, b_i_bound * BATCH_SIZE, remainer, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); } time_labeling += WallTimer::get_time_mark(); //cache_miss.measure_stop(); // Test setlocale(LC_NUMERIC, ""); if (0 == host_id) { printf("BATCH_SIZE: %u ", BATCH_SIZE); printf("BP_Size: %u\n", BITPARALLEL_SIZE); } {// Total Number of Labels EdgeID local_num_labels = 0; for (VertexID v_global = 0; v_global < num_v; ++v_global) { if (G.get_master_host_id(v_global) != host_id) { continue; } local_num_labels += L[G.get_local_vertex_id(v_global)].vertices.size(); } EdgeID global_num_labels; MPI_Allreduce(&local_num_labels, &global_num_labels, 1, MPI_Instance::get_mpi_datatype<EdgeID>(), MPI_SUM, MPI_COMM_WORLD); // printf("host_id: %u local_num_labels: %lu %.2f%%\n", host_id, local_num_labels, 100.0 * local_num_labels / global_num_labels); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("Global_num_labels: %lu average: %f\n", global_num_labels, 1.0 * global_num_labels / num_v); } // VertexID local_num_batches = 0; // VertexID local_num_distances = 0; //// double local_avg_distances_per_batches = 0; // for (VertexID v_global = 0; v_global < num_v; ++v_global) { // if (G.get_master_host_id(v_global) != host_id) { // continue; // } // VertexID v_local = G.get_local_vertex_id(v_global); // local_num_batches += L[v_local].batches.size(); // local_num_distances += L[v_local].distances.size(); //// double avg_d_p_b = 0; //// for (VertexID i_b = 0; i_b < L[v_local].batches.size(); ++i_b) { //// avg_d_p_b += L[v_local].batches[i_b].size; //// } //// avg_d_p_b /= L[v_local].batches.size(); //// local_avg_distances_per_batches += avg_d_p_b; // } //// local_avg_distances_per_batches /= num_masters; //// double local_avg_batches = local_num_batches * 1.0 / num_masters; //// double local_avg_distances = local_num_distances * 1.0 / num_masters; // uint64_t global_num_batches = 0; // uint64_t global_num_distances = 0; // MPI_Allreduce( // &local_num_batches, // &global_num_batches, // 1, // MPI_UINT64_T, // MPI_SUM, // MPI_COMM_WORLD); //// global_avg_batches /= num_hosts; // MPI_Allreduce( // &local_num_distances, // &global_num_distances, // 1, // MPI_UINT64_T, // MPI_SUM, // MPI_COMM_WORLD); //// global_avg_distances /= num_hosts; // double global_avg_d_p_b = global_num_distances * 1.0 / global_num_batches; // double global_avg_l_p_d = global_num_labels * 1.0 / global_num_distances; // double global_avg_batches = global_num_batches / num_v; // double global_avg_distances = global_num_distances / num_v; //// MPI_Allreduce( //// &local_avg_distances_per_batches, //// &global_avg_d_p_b, //// 1, //// MPI_DOUBLE, //// MPI_SUM, //// MPI_COMM_WORLD); //// global_avg_d_p_b /= num_hosts; // MPI_Barrier(MPI_COMM_WORLD); // if (0 == host_id) { // printf("global_avg_batches: %f " // "global_avg_distances: %f " // "global_avg_distances_per_batch: %f " // "global_avg_labels_per_distance: %f\n", // global_avg_batches, // global_avg_distances, // global_avg_d_p_b, // global_avg_l_p_d); // } } // printf("BP_labeling: %f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100); // printf("Initializing: %f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100); // printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100); // printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100); // printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100); // printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100); // printf("Candidating: %f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100); // printf("Adding: %f %.2f%%\n", adding_time, adding_time / time_labeling * 100); // printf("distance_query_time: %f %.2f%%\n", distance_query_time, distance_query_time / time_labeling * 100); // uint64_t total_check_count = bp_hit_count + normal_check_count; // printf("total_check_count: %'llu\n", total_check_count); // printf("bp_hit_count: %'llu %.2f%%\n", // bp_hit_count, // bp_hit_count * 100.0 / total_check_count); // printf("normal_check_count: %'llu %.2f%%\n", normal_check_count, normal_check_count * 100.0 / total_check_count); // printf("total_candidates_num: %'llu set_candidates_num: %'llu %.2f%%\n", // total_candidates_num, // set_candidates_num, // set_candidates_num * 100.0 / total_candidates_num); // printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n", // normal_hit_count, // normal_hit_count * 100.0 / total_check_count, // normal_hit_count * 100.0 / (total_check_count - bp_hit_count)); //cache_miss.print(); // printf("Candidating: "); candidating_ins_count.print(); // printf("Adding: "); adding_ins_count.print(); // printf("BP_Labeling: "); bp_labeling_ins_count.print(); // printf("BP_Checking: "); bp_checking_ins_count.print(); // printf("distance_query: "); dist_query_ins_count.print(); // printf("num_hosts: %u host_id: %u\n" // "Local_labeling_time: %.2f seconds\n" // "bp_labeling_time: %.2f %.2f%%\n" // "initializing_time: %.2f %.2f%%\n" // "scatter_time: %.2f %.2f%%\n" // "gather_time: %.2f %.2f%%\n" // "clearup_time: %.2f %.2f%%\n" // "message_time: %.2f %.2f%%\n", // num_hosts, host_id, // time_labeling, // bp_labeling_time, 100.0 * bp_labeling_time / time_labeling, // initializing_time, 100.0 * initializing_time / time_labeling, // scatter_time, 100.0 * scatter_time / time_labeling, // gather_time, 100.0 * gather_time / time_labeling, // clearup_time, 100.0 * clearup_time / time_labeling, // message_time, 100.0 * message_time / time_labeling); double global_time_labeling; MPI_Allreduce(&time_labeling, &global_time_labeling, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("num_hosts: %d " "num_threads: %d " "Global_labeling_time: %.2f seconds\n", num_hosts, NUM_THREADS, global_time_labeling); } // End test } //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_labeling( // const DistGraph &G, // std::vector<uint8_t> &used_bp_roots) //{ //// VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; // // std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_v); // active queue // std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // while (r < num_v && used_bp_roots[r]) { // ++r; // } // if (r == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // used_bp_roots[r] = true; // // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // tmp_d[r] = 0; // que_t1 = que_h; // // int ns = 0; // number of selected neighbor, default 64 // // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. //// VertexID i_bound = G.vertices[r] - 1; //// VertexID i_start = i_bound + G.out_degrees[r]; //// for (VertexID i = i_start; i > i_bound; --i) { // //int i_bound = G.vertices[r]; // //int i_start = i_bound + G.out_degrees[r] - 1; // //for (int i = i_start; i >= i_bound; --i) { // VertexID d_i_bound = G.local_out_degrees[r]; // EdgeID i_start = G.vertices_idx[r] + d_i_bound - 1; // for (VertexID d_i = 0; d_i < d_i_bound; ++d_i) { // EdgeID i = i_start - d_i; // VertexID v = G.out_edges[i]; // if (!used_bp_roots[v]) { // used_bp_roots[v] = true; // // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) // que[que_h++] = v; // tmp_d[v] = 1; // tmp_s[v].first = 1ULL << ns; // if (++ns == 64) break; // } // } // //} //// } // // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // for (VertexID que_i = que_t0; que_i < que_t1; ++que_i) { // VertexID v = que[que_i]; //// bit_parallel_push_labels(G, //// v, //// que, //// que_h, //// sibling_es, //// num_sibling_es, //// child_es, //// num_child_es, //// tmp_d, //// d); // EdgeID i_start = G.vertices_idx[v]; // EdgeID i_bound = i_start + G.local_out_degrees[v]; // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv = G.out_edges[i]; // UnweightedDist td = d + 1; // // if (d > tmp_d[tv]) { // ; // } // else if (d == tmp_d[tv]) { // if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v; // sibling_es[num_sibling_es].second = tv; // ++num_sibling_es; // } // } else { // d < tmp_d[tv] // if (tmp_d[tv] == MAX_UNWEIGHTED_DIST) { // que[que_h++] = tv; // tmp_d[tv] = td; // } // child_es[num_child_es].first = v; // child_es[num_child_es].second = tv; // ++num_child_es; // } // } // } // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // tmp_s[w].second |= tmp_s[v].first; // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // // {// test // printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (4 == d) { //// exit(EXIT_SUCCESS); //// } // } // // que_t0 = que_t1; // que_t1 = que_h; // } // // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } // //} template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_push_labels( const DistGraph &G, const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, const UnweightedDist iter) { EdgeID i_start = G.vertices_idx[v_global]; EdgeID i_bound = i_start + G.local_out_degrees[v_global]; // {//test // printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); // } for (EdgeID i = i_start; i < i_bound; ++i) { VertexID tv_global = G.out_edges[i]; VertexID tv_local = G.get_local_vertex_id(tv_global); UnweightedDist td = iter + 1; if (iter > dists[tv_local]) { ; } else if (iter == dists[tv_local]) { if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].first = v_global; tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].second = tv_global; ++size_tmp_sibling_es; // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; } } else { // iter < dists[tv] if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { if (CAS(dists.data() + tv_local, MAX_UNWEIGHTED_DIST, td)) { tmp_q[offset_tmp_q + size_tmp_q++] = tv_global; } } // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } tmp_child_es[offset_tmp_q + size_tmp_child_es].first = v_global; tmp_child_es[offset_tmp_q + size_tmp_child_es].second = tv_global; ++size_tmp_child_es; // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; } } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_labeling( const DistGraph &G, // std::vector<IndexType> &L, std::vector<uint8_t> &used_bp_roots) { // Class type of Bit-Parallel label message unit. struct MsgUnitBP { VertexID v_global; uint64_t S_n1; uint64_t S_0; MsgUnitBP() = default; // MsgUnitBP(MsgUnitBP&& other) = default; // MsgUnitBP(MsgUnitBP& other) = default; // MsgUnitBP& operator=(const MsgUnitBP& other) = default; // MsgUnitBP& operator=(MsgUnitBP&& other) = default; MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) : v_global(v), S_n1(sn1), S_0(s0) { } }; // VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; EdgeID local_num_edges = G.num_edges_local; std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<VertexID> que(num_masters); // active queue VertexID end_que = 0; std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que VertexID end_tmp_que = 0; std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. VertexID r_global = 0; // root r for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // {// test // if (0 == host_id) { // printf("i_bpsp: %u\n", i_bpspt); // } // } // Select the root r_global if (0 == host_id) { while (r_global < num_v && used_bp_roots[r_global]) { ++r_global; } if (r_global == num_v) { for (VertexID v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; } continue; } } // Broadcast the r here. // message_time -= WallTimer::get_time_mark(); MPI_Bcast(&r_global, 1, V_ID_Type, 0, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // Mark the r_global if (G.get_master_host_id(r_global) == host_id) { tmp_d[G.get_local_vertex_id(r_global)] = 0; que[end_que++] = r_global; } // Select the r_global's 64 neighbors { // Get r_global's neighbors into buffer_send, rank from high to low. VertexID local_degree = G.local_out_degrees[r_global]; std::vector<VertexID> buffer_send(local_degree); if (local_degree) { EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; for (VertexID d_i = 0; d_i < local_degree; ++d_i) { EdgeID e_i = e_i_start - d_i; buffer_send[d_i] = G.out_edges[e_i]; } } // Get selected neighbors (up to 64) std::vector<VertexID> selected_nbrs; if (0 != host_id) { // Every host other than 0 sends neighbors to host 0 // message_time -= WallTimer::get_time_mark(); MPI_Instance::send_buffer_2_dst(buffer_send, 0, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); // Receive selected neighbors from host 0 MPI_Instance::recv_buffer_from_src(selected_nbrs, 0, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); // message_time += WallTimer::get_time_mark(); } else { // Host 0 // Host 0 receives neighbors from others std::vector<VertexID> all_nbrs(buffer_send); std::vector<VertexID > buffer_recv; for (int loc = 0; loc < num_hosts - 1; ++loc) { // message_time -= WallTimer::get_time_mark(); MPI_Instance::recv_buffer_from_any(buffer_recv, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); // message_time += WallTimer::get_time_mark(); if (buffer_recv.empty()) { continue; } buffer_send.resize(buffer_send.size() + buffer_recv.size()); std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); all_nbrs.resize(buffer_send.size()); all_nbrs.assign(buffer_send.begin(), buffer_send.end()); } assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // Select 64 (or less) neighbors VertexID ns = 0; // number of selected neighbor, default 64 for (VertexID v_global : all_nbrs) { if (used_bp_roots[v_global]) { continue; } used_bp_roots[v_global] = 1; selected_nbrs.push_back(v_global); if (++ns == 64) { break; } } // Send selected neighbors to other hosts // message_time -= WallTimer::get_time_mark(); for (int dest = 1; dest < num_hosts; ++dest) { MPI_Instance::send_buffer_2_dst(selected_nbrs, dest, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); } // message_time += WallTimer::get_time_mark(); } // {//test // printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); // } // Synchronize the used_bp_roots. for (VertexID v_global : selected_nbrs) { used_bp_roots[v_global] = 1; } // Mark selected neighbors for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { VertexID v_global = selected_nbrs[v_i]; if (host_id != G.get_master_host_id(v_global)) { continue; } tmp_que[end_tmp_que++] = v_global; tmp_d[G.get_local_vertex_id(v_global)] = 1; tmp_s[v_global].first = 1ULL << v_i; } } // Reduce the global number of active vertices VertexID global_num_actives = 1; UnweightedDist d = 0; while (global_num_actives) { {// Limit the distance if (d > 7) { break; } } //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("d: %u que_size: %u\n", d, global_num_actives); // } // } //#endif // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { VertexID num_sibling_es = 0, num_child_es = 0; // Send active masters to mirrors { std::vector<MsgUnitBP> buffer_send(end_que); for (VertexID que_i = 0; que_i < end_que; ++que_i) { VertexID v_global = que[que_i]; buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); } // {// test // printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); // } for (int root = 0; root < num_hosts; ++root) { std::vector<MsgUnitBP> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } // For parallel adding to queue VertexID size_buffer_recv = buffer_recv.size(); std::vector<VertexID> offsets_tmp_q(size_buffer_recv); #pragma omp parallel for for (VertexID i_q = 0; i_q < size_buffer_recv; ++i_q) { offsets_tmp_q[i_q] = G.local_out_degrees[buffer_recv[i_q].v_global]; } VertexID num_neighbors = PADO::prefix_sum_for_offsets(offsets_tmp_q); std::vector<VertexID> tmp_q(num_neighbors); std::vector<VertexID> sizes_tmp_q(size_buffer_recv, 0); // For parallel adding to sibling_es std::vector< std::pair<VertexID, VertexID> > tmp_sibling_es(num_neighbors); std::vector<VertexID> sizes_tmp_sibling_es(size_buffer_recv, 0); // For parallel adding to child_es std::vector< std::pair<VertexID, VertexID> > tmp_child_es(num_neighbors); std::vector<VertexID> sizes_tmp_child_es(size_buffer_recv, 0); #pragma omp parallel for // for (const MsgUnitBP &m : buffer_recv) { for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { const MsgUnitBP &m = buffer_recv[i_m]; VertexID v_global = m.v_global; if (!G.local_out_degrees[v_global]) { continue; } tmp_s[v_global].first = m.S_n1; tmp_s[v_global].second = m.S_0; // Push labels bit_parallel_push_labels( G, v_global, tmp_q, sizes_tmp_q[i_m], tmp_sibling_es, sizes_tmp_sibling_es[i_m], tmp_child_es, sizes_tmp_child_es[i_m], offsets_tmp_q[i_m], // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, tmp_d, d); } {// From tmp_sibling_es to sibling_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_sibling_es); PADO::collect_into_queue( tmp_sibling_es, offsets_tmp_q, sizes_tmp_sibling_es, total_size_tmp, sibling_es, num_sibling_es); } {// From tmp_child_es to child_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_child_es); PADO::collect_into_queue( tmp_child_es, offsets_tmp_q, sizes_tmp_child_es, total_size_tmp, child_es, num_child_es); } {// From tmp_q to tmp_que idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_q); PADO::collect_into_queue( tmp_q, offsets_tmp_q, sizes_tmp_q, total_size_tmp, tmp_que, end_tmp_que); } // {// test // printf("host_id: %u root: %u done push.\n", host_id, root); // } } } // Update the sets in tmp_s { #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first, w = sibling_es[i].second; __atomic_or_fetch(&tmp_s[v].second, tmp_s[w].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[w].second, tmp_s[v].first, __ATOMIC_SEQ_CST); // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; } // Put into the buffer sending to others std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first; VertexID w = sibling_es[i].second; buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); } // Send the messages for (int root = 0; root < num_hosts; ++root) { std::vector< std::pair<VertexID, uint64_t> > buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } size_t i_m_bound = buffer_recv.size(); #pragma omp parallel for for (size_t i_m = 0; i_m < i_m_bound; ++i_m) { const auto &m = buffer_recv[i_m]; __atomic_or_fetch(&tmp_s[m.first].second, m.second, __ATOMIC_SEQ_CST); } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } } #pragma omp parallel for for (VertexID i = 0; i < num_child_es; ++i) { VertexID v = child_es[i].first, c = child_es[i].second; __atomic_or_fetch(&tmp_s[c].first, tmp_s[v].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[c].second, tmp_s[v].second, __ATOMIC_SEQ_CST); // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; } } //#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // //// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (0 == d) { //// exit(EXIT_SUCCESS); //// } // } //#endif // Swap que and tmp_que tmp_que.swap(que); end_que = end_tmp_que; end_tmp_que = 0; MPI_Allreduce(&end_que, &global_num_actives, 1, V_ID_Type, MPI_MAX, MPI_COMM_WORLD); // } ++d; } #pragma omp parallel for for (VertexID v_local = 0; v_local < num_masters; ++v_local) { VertexID v_global = G.get_global_vertex_id(v_local); L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_push_labels( // const DistGraph &G, // const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // const UnweightedDist iter) //{ // EdgeID i_start = G.vertices_idx[v_global]; // EdgeID i_bound = i_start + G.local_out_degrees[v_global]; //// {//test //// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); //// } // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv_global = G.out_edges[i]; // VertexID tv_local = G.get_local_vertex_id(tv_global); // UnweightedDist td = iter + 1; // // if (iter > dists[tv_local]) { // ; // } else if (iter == dists[tv_local]) { // if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; // } // } else { // iter < dists[tv] // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; //// { //// printf("host_id: %u num_child_es: %u v_global: %u tv_global: %u\n", host_id, num_child_es, v_global, tv_global);//test //// } // } // } // //} // //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots) //{ // // Class type of Bit-Parallel label message unit. // struct MsgUnitBP { // VertexID v_global; // uint64_t S_n1; // uint64_t S_0; // // MsgUnitBP() = default; //// MsgUnitBP(MsgUnitBP&& other) = default; //// MsgUnitBP(MsgUnitBP& other) = default; //// MsgUnitBP& operator=(const MsgUnitBP& other) = default; //// MsgUnitBP& operator=(MsgUnitBP&& other) = default; // MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) // : v_global(v), S_n1(sn1), S_0(s0) { } // }; //// VertexID num_v = G.num_v; //// EdgeID num_e = G.num_e; // EdgeID local_num_edges = G.num_edges_local; // // std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_masters); // active queue // VertexID end_que = 0; // std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que // VertexID end_tmp_que = 0; // std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. // //// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v //// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} //// std::vector<VertexID> que(num_v); // active queue //// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) //// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r_global = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // // Select the root r_global // if (0 == host_id) { // while (r_global < num_v && used_bp_roots[r_global]) { // ++r_global; // } // if (r_global == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // } // // Broadcast the r here. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&r_global, // 1, // V_ID_Type, // 0, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // //// VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // // Mark the r_global // if (G.get_master_host_id(r_global) == host_id) { // tmp_d[G.get_local_vertex_id(r_global)] = 0; // que[end_que++] = r_global; // } // // Select the r_global's 64 neighbors // { // // Get r_global's neighbors into buffer_send, rank from low to high. // VertexID local_degree = G.local_out_degrees[r_global]; // std::vector<VertexID> buffer_send(local_degree); // if (local_degree) { // EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; // for (VertexID d_i = 0; d_i < local_degree; ++d_i) { // EdgeID e_i = e_i_start - d_i; // buffer_send[d_i] = G.out_edges[e_i]; // } // } // // // Get selected neighbors (up to 64) // std::vector<VertexID> selected_nbrs; // if (0 != host_id) { // // Every host other than 0 sends neighbors to host 0 // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // 0, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); // // Receive selected neighbors from host 0 // MPI_Instance::recv_buffer_from_src(selected_nbrs, // 0, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // } else { // // Host 0 // // Host 0 receives neighbors from others // std::vector<VertexID> all_nbrs(buffer_send); // std::vector<VertexID > buffer_recv; // for (int loc = 0; loc < num_hosts - 1; ++loc) { // message_time -= WallTimer::get_time_mark(); // MPI_Instance::recv_buffer_from_any(buffer_recv, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); //// MPI_Instance::receive_dynamic_buffer_from_any(buffer_recv, //// num_hosts, //// SENDING_ROOT_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // if (buffer_recv.empty()) { // continue; // } // // buffer_send.resize(buffer_send.size() + buffer_recv.size()); // std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); // all_nbrs.resize(buffer_send.size()); // all_nbrs.assign(buffer_send.begin(), buffer_send.end()); // } // assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // // Select 64 (or less) neighbors // VertexID ns = 0; // number of selected neighbor, default 64 // for (VertexID v_global : all_nbrs) { // if (used_bp_roots[v_global]) { // continue; // } // used_bp_roots[v_global] = 1; // selected_nbrs.push_back(v_global); // if (++ns == 64) { // break; // } // } // // Send selected neighbors to other hosts // message_time -= WallTimer::get_time_mark(); // for (int dest = 1; dest < num_hosts; ++dest) { // MPI_Instance::send_buffer_2_dst(selected_nbrs, // dest, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // } // message_time += WallTimer::get_time_mark(); // } //// {//test //// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); //// } // // // Synchronize the used_bp_roots. // for (VertexID v_global : selected_nbrs) { // used_bp_roots[v_global] = 1; // } // // // Mark selected neighbors // for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { // VertexID v_global = selected_nbrs[v_i]; // if (host_id != G.get_master_host_id(v_global)) { // continue; // } // tmp_que[end_tmp_que++] = v_global; // tmp_d[G.get_local_vertex_id(v_global)] = 1; // tmp_s[v_global].first = 1ULL << v_i; // } // } // // // Reduce the global number of active vertices // VertexID global_num_actives = 1; // UnweightedDist d = 0; // while (global_num_actives) { //// for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // // // Send active masters to mirrors // { // std::vector<MsgUnitBP> buffer_send(end_que); // for (VertexID que_i = 0; que_i < end_que; ++que_i) { // VertexID v_global = que[que_i]; // buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); // } //// {// test //// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); //// } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgUnitBP> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgUnitBP &m : buffer_recv) { // VertexID v_global = m.v_global; // if (!G.local_out_degrees[v_global]) { // continue; // } // tmp_s[v_global].first = m.S_n1; // tmp_s[v_global].second = m.S_0; // // Push labels // bit_parallel_push_labels(G, // v_global, // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, // tmp_d, // d); // } //// {// test //// printf("host_id: %u root: %u done push.\n", host_id, root); //// } // } // } // // // Update the sets in tmp_s // { // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; // // } // // Put into the buffer sending to others // std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); //// std::vector< std::vector<MPI_Request> > requests_list(num_hosts - 1); // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first; // VertexID w = sibling_es[i].second; //// buffer_send.emplace_back(v, tmp_s[v].second); //// buffer_send.emplace_back(w, tmp_s[w].second); // buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); // buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); // } // // Send the messages // for (int root = 0; root < num_hosts; ++root) { // std::vector< std::pair<VertexID, uint64_t> > buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // } ////#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // } ////#endif // // // Swap que and tmp_que // tmp_que.swap(que); // end_que = end_tmp_que; // end_tmp_que = 0; // MPI_Allreduce(&end_que, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // //// } // ++d; // } // // for (VertexID v_local = 0; v_local < num_masters; ++v_local) { // VertexID v_global = G.get_global_vertex_id(v_local); // L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; // L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} // L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } //} //// Function bit parallel checking: //// return false if shortest distance exits in bp labels, return true if bp labels cannot cover the distance //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline bool DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_checking( // VertexID v_id, // VertexID w_id, // const std::vector<IndexType> &L, // UnweightedDist iter) //{ // // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already // const IndexType &Lv = L[v_id]; // const IndexType &Lw = L[w_id]; // // _mm_prefetch(&Lv.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lv.bp_sets[0][0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_sets[0][0], _MM_HINT_T0); // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = Lv.bp_dist[i] + Lw.bp_dist[i]; // Use type VertexID in case of addition of two INF. // if (td - 2 <= iter) { // td += // (Lv.bp_sets[i][0] & Lw.bp_sets[i][0]) ? -2 : // ((Lv.bp_sets[i][0] & Lw.bp_sets[i][1]) | // (Lv.bp_sets[i][1] & Lw.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { //// ++bp_hit_count; // return false; // } // } // } // return true; //} // Function for initializing at the begin of a batch // For a batch, initialize the temporary labels and real labels of roots; // traverse roots' labels to initialize distance buffer; // unset flag arrays is_active and got_labels template <VertexID BATCH_SIZE> inline VertexID DistBVCPLL<BATCH_SIZE>:: initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots) { // Get the roots_master_local, containing all local roots. std::vector<VertexID> roots_master_local; VertexID size_roots_master_local; VertexID roots_bound = roots_start + roots_size; try { for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { roots_master_local.push_back(G.get_local_vertex_id(r_global)); // {//test // if (1024 == roots_start && 7 == host_id && 31600 == *roots_master_local.rbegin()) { // printf("S0.0 host_id: %d " // "31600 YES!\n", // host_id); // } // } } } size_roots_master_local = roots_master_local.size(); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_roots_master_local: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Short_index { if (end_once_candidated_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } else { for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } end_once_candidated_queue = 0; if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels } } else { for (VertexID r_local : roots_master_local) { short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels } } } // // Real Index try { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; IndexType &Lr = L[r_local]; // Lr.batches.emplace_back( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local)); // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } else { for (VertexID r_local : roots_master_local) { IndexType &Lr = L[r_local]; // Lr.batches.emplace_back( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local)); // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_real_index: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Dist Table try { // struct LabelTableUnit { // VertexID root_id; // VertexID label_global_id; // UnweightedDist dist; // // LabelTableUnit() = default; // // LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : // root_id(r), label_global_id(l), dist(d) {} // }; std::vector<LabelTableUnit> buffer_send; // buffer for sending // Dist_matrix { // Deprecated Old method: unpack the IndexType structure before sending. // Okay, it's back. if (size_roots_master_local >= THRESHOLD_PARALLEL) { // Offsets for adding labels to buffer_send in parallel std::vector<VertexID> offsets_beffer_send(size_roots_master_local); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; offsets_beffer_send[i_r] = L[r_local].vertices.size(); } EdgeID size_labels = PADO::prefix_sum_for_offsets(offsets_beffer_send); buffer_send.resize(size_labels); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; VertexID top_location = 0; IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { VertexID dist_bound_index = Lr.distances.size(); for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table // buffer_send[offsets_beffer_send[i_r] + top_location++] = // LabelTableUnit(r_root_id, Lr.vertices[v_i] + id_offset, dist); buffer_send[offsets_beffer_send[i_r] + top_location++] = LabelTableUnit(r_root_id, Lr.vertices[v_i], dist); } } // } } } else { for (VertexID r_local : roots_master_local) { // The distance table. IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { VertexID dist_bound_index = Lr.distances.size(); for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table buffer_send.emplace_back(r_root_id, Lr.vertices[v_i], dist); // buffer for sending // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending } } // } } } } // Broadcast local roots labels for (int root = 0; root < num_hosts; ++root) { std::vector<LabelTableUnit> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record root_id's number of its received label, for later adding to recved_dist_table __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); // recved_dist_table[root_id].push_back(label_global_id); } // Record the received label in recved_dist_table, for later reset #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID &size = sizes_recved_root_labels[root_id]; if (size) { recved_dist_table[root_id].resize(size); size = 0; } } #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], label_global_id); } } else { for (const LabelTableUnit &l : buffer_recv) { VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record the received label in recved_dist_table, for later reset recved_dist_table[root_id].push_back(label_global_id); } } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_dist_table: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Build the Bit-Parallel Labels Table try { // struct MsgBPLabel { // VertexID r_root_id; // UnweightedDist bp_dist[BITPARALLEL_SIZE]; // uint64_t bp_sets[BITPARALLEL_SIZE][2]; // // MsgBPLabel() = default; // MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) // : r_root_id(r) // { // memcpy(bp_dist, dist, sizeof(bp_dist)); // memcpy(bp_sets, sets, sizeof(bp_sets)); // } // }; // std::vector<MPI_Request> requests_send(num_hosts - 1); std::vector<MsgBPLabel> buffer_send; std::vector<VertexID> roots_queue; for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { if (G.get_master_host_id(r_global) != host_id) { continue; } roots_queue.push_back(r_global); } VertexID size_roots_queue = roots_queue.size(); if (size_roots_queue >= THRESHOLD_PARALLEL) { buffer_send.resize(size_roots_queue); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_queue; ++i_r) { VertexID r_global = roots_queue[i_r]; VertexID r_local = G.get_local_vertex_id(r_global); VertexID r_root = r_global - roots_start; // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); buffer_send[i_r] = MsgBPLabel(r_root, L[r_local].bp_dist, L[r_local].bp_sets); } } else { // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } for (VertexID r_global : roots_queue) { VertexID r_local = G.get_local_vertex_id(r_global); VertexID r_root = r_global - roots_start; // Local roots // memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // Prepare for sending buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); } } for (int root = 0; root < num_hosts; ++root) { std::vector<MsgBPLabel> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } VertexID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { const MsgBPLabel &m = buffer_recv[i_m]; VertexID r_root = m.r_root_id; memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); } } else { for (const MsgBPLabel &m : buffer_recv) { VertexID r_root = m.r_root_id; memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); } } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_bp_labels_table: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Active_queue VertexID global_num_actives = 0; // global number of active vertices. { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; active_queue[i_r] = r_local; // {//test // if (1024 == roots_start && 7 == host_id && 31600 == r_local) { // printf("S0.0 host_id: %d " // "31600 YES!\n", // host_id); // } // if (1024 == roots_start && 7 == host_id && 0 == i_r) { // printf("S0.0 host_id: %d " // "active_queue[%u]: %u\n", // host_id, // i_r, // active_queue[i_r]); // } // } } end_active_queue = size_roots_master_local; } else { for (VertexID r_local : roots_master_local) { active_queue[end_active_queue++] = r_local; // {//test // if (1024 == roots_start && 7 == host_id && 31600 == r_local) { // printf("S0.0 host_id: %d " // "31600 YES!\n", // host_id); // } // } } { } } // {//test // if (1024 == roots_start && 7 == host_id) { // for (VertexID i_r = 0; i_r < end_active_queue; ++i_r) { // VertexID r_local = active_queue[i_r]; // if (r_local == 31600) { // printf("S0.0 host_id: %d " // "L:%u " // "i_r: %u " // "end_active_queue: %u " // "roots_master_local.size(): %lu " // "size_roots_master_local: %u " // "active_queue[%u]: %u " // "roots_master_local[%u]: %u " // "31600 YES!\n", // host_id, // __LINE__, // i_r, // end_active_queue, // roots_master_local.size(), // size_roots_master_local, // i_r, active_queue[i_r], // i_r, roots_master_local[i_r]); // } // } // printf("S0.0 host_id: %d " // "L[31600].distances.size(): %lu " // "L[21956].distances.size(): %lu " // "L[30711].distances.size(): %lu " // "L[31113].distances.size(): %lu\n", // host_id, // L[31600].distances.size(), // L[21956].distances.size(), // L[30711].distances.size(), // L[31113].distances.size()); // } // } // Get the global number of active vertices; // message_time -= WallTimer::get_time_mark(); MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, // MPI_SUM, MPI_MAX, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); } return global_num_actives; } // Sequential Version //// Function for initializing at the begin of a batch //// For a batch, initialize the temporary labels and real labels of roots; //// traverse roots' labels to initialize distance buffer; //// unset flag arrays is_active and got_labels //template <VertexID BATCH_SIZE> //inline VertexID DistBVCPLL<BATCH_SIZE>:: //initialization( // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated, // VertexID b_id, // VertexID roots_start, // VertexID roots_size, //// std::vector<VertexID> &roots_master_local, // const std::vector<uint8_t> &used_bp_roots) //{ // // Get the roots_master_local, containing all local roots. // std::vector<VertexID> roots_master_local; // VertexID roots_bound = roots_start + roots_size; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { // roots_master_local.push_back(G.get_local_vertex_id(r_global)); // } // } // // Short_index // { // for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { // VertexID v_local = once_candidated_queue[v_i]; // short_index[v_local].indicator_reset(); // once_candidated[v_local] = 0; // } // end_once_candidated_queue = 0; // for (VertexID r_local : roots_master_local) { // short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels //// short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself //// short_index[r_local].indicator.set(BATCH_SIZE); // v got labels // } // } //// // // Real Index // { // for (VertexID r_local : roots_master_local) { // IndexType &Lr = L[r_local]; // Lr.batches.emplace_back( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1); // size // Lr.distances.emplace_back( // Lr.vertices.size(), // start_index // 1, // size // 0); // dist // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); // } // } // // // Dist Table // { //// struct LabelTableUnit { //// VertexID root_id; //// VertexID label_global_id; //// UnweightedDist dist; //// //// LabelTableUnit() = default; //// //// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : //// root_id(r), label_global_id(l), dist(d) {} //// }; // std::vector<LabelTableUnit> buffer_send; // buffer for sending // // Dist_matrix // { // // Deprecated Old method: unpack the IndexType structure before sending. // for (VertexID r_local : roots_master_local) { // // The distance table. // IndexType &Lr = L[r_local]; // VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // // Write into the dist_table //// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = dist; // distance table // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending // } // } // } // } // } // // Broadcast local roots labels // for (int root = 0; root < num_hosts; ++root) { // std::vector<LabelTableUnit> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const LabelTableUnit &l : buffer_recv) { // VertexID root_id = l.root_id; // VertexID label_global_id = l.label_global_id; // UnweightedDist dist = l.dist; // dist_table[root_id][label_global_id] = dist; // // Record the received label in recved_dist_table, for later reset // recved_dist_table[root_id].push_back(label_global_id); // } // } // } // // // Build the Bit-Parallel Labels Table // { //// struct MsgBPLabel { //// VertexID r_root_id; //// UnweightedDist bp_dist[BITPARALLEL_SIZE]; //// uint64_t bp_sets[BITPARALLEL_SIZE][2]; //// //// MsgBPLabel() = default; //// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) //// : r_root_id(r) //// { //// memcpy(bp_dist, dist, sizeof(bp_dist)); //// memcpy(bp_sets, sets, sizeof(bp_sets)); //// } //// }; //// std::vector<MPI_Request> requests_send(num_hosts - 1); // std::vector<MsgBPLabel> buffer_send; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } // VertexID r_local = G.get_local_vertex_id(r_global); // VertexID r_root = r_global - roots_start; // // Local roots //// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); //// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); // } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgBPLabel> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgBPLabel &m : buffer_recv) { // VertexID r_root = m.r_root_id; // memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // } // } // } // // // TODO: parallel enqueue // // Active_queue // VertexID global_num_actives = 0; // global number of active vertices. // { // for (VertexID r_local : roots_master_local) { // active_queue[end_active_queue++] = r_local; // } // // Get the global number of active vertices; // message_time -= WallTimer::get_time_mark(); // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // } // // return global_num_actives; //} //// Function: push v_head_global's newly added labels to its all neighbors. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // const BPLabelType &L_label = bp_labels_table[label_root_id]; // VertexID label_global_id = label_root_id + roots_start; // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // all remaining v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // if (v_tail_global <= label_global_id) { // // remaining v_tail_global has higher rank than the label // return; // } // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } //// {// Just for the complain from the compiler //// assert(iter >= iter); //// } //} template<VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter) { std::vector<std::pair<VertexID, VertexID> > buffer_send_indices; //.first: Vertex ID //.second: size of labels std::vector<VertexID> buffer_send_labels; // {//test // if (1024 == roots_start && 7 == host_id) { // printf("S1.0 host_id: %d local_size: %u\n", host_id, local_size); // } // } if (local_size) { const VertexID start_active_queue = global_start; const VertexID size_active_queue = global_size <= local_size ? global_size : local_size; const VertexID bound_active_queue = start_active_queue + size_active_queue; // {//test // if (1024 == roots_start && 7 == host_id) { // printf("S1.0 host_id: %d " // "start_active_queue: %u " // "size_active_queue: %u\n", // host_id, // start_active_queue, // size_active_queue); // } // } buffer_send_indices.resize(size_active_queue); // Prepare offset for inserting std::vector<VertexID> offsets_buffer_locs(size_active_queue); #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active const IndexType &Lv = L[v_head_local]; // {// // if (1024 == roots_start && 7 == host_id) { // printf("S1.0 host_id: %d " // "i_q: %u " // "L[%u].distances.size(): %lu \n", // host_id, // i_q, // v_head_local, Lv.distances.size()); // } // } offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; } EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); // {//test // if (1024 == roots_start && 7 == host_id) { // printf("S1.0 host_id: %d " // "size_buffer_send_labels: %lu \n", // host_id, // size_buffer_send_labels); // } // } try { buffer_send_labels.resize(size_buffer_send_labels); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("schedule_label_pushing_para.buffer_send_labels: bad_alloc " "host_id: %d " "size_buffer_send_labels: %lu " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, size_buffer_send_labels, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // {//test // if (1024 == roots_start && 7 == host_id) { // printf("S1.0 host_id: %d " // "buffer_send_labels.size(): %lu \n", // host_id, // buffer_send_labels.size()); // } // } // Build buffer_send_labels by parallel inserting #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active VertexID v_head_global = G.get_global_vertex_id(v_head_local); const IndexType &Lv = L[v_head_local]; // Prepare the buffer_send_indices VertexID tmp_i_q = i_q - start_active_queue; buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // These 2 index are used for traversing v_head's last inserted labels VertexID l_i_start = Lv.distances.rbegin()->start_index; VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; VertexID top_labels = offsets_buffer_locs[tmp_i_q]; for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { VertexID label_root_id = Lv.vertices[l_i] - roots_start; buffer_send_labels[top_labels++] = label_root_id; // buffer_send_labels.push_back(label_root_id); } } } // {//test // if (1024 == roots_start && 7 == host_id) { // printf("S1.1 host_id: %d local_size: %u\n", host_id, local_size); // } // } //////////////////////////////////////////////// //// // const VertexID bound_active_queue = start_active_queue + size_active_queue; // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(size_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // // Parallel Version // // Prepare offset for inserting // std::vector<VertexID> offsets_buffer_locs(size_active_queue); //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // const IndexType &Lv = L[v_head_local]; // offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; // } // EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); //// {// test //// if (0 == host_id) { //// double memtotal = 0; //// double memfree = 0; //// double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID); //// PADO::Utils::system_memory(memtotal, memfree); //// printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n", //// bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024); //// } //// } // buffer_send_labels.resize(size_buffer_send_labels); //// {// test //// if (0 == host_id) { //// printf("buffer_send_labels created.\n"); //// } //// } // // // Build buffer_send_labels by parallel inserting //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID tmp_i_q = i_q - start_active_queue; // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // VertexID top_labels = offsets_buffer_locs[tmp_i_q]; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels[top_labels++] = label_root_id; //// buffer_send_labels.push_back(label_root_id); // } // } //// end_active_queue = 0; //// //////////////////////////////////////////////// for (int root = 0; root < num_hosts; ++root) { // {//test // if (1024 == roots_start) { // printf("S2 root: %u " // "host_id: %d " // "buffer_send_indices.size(): %lu\n", // root, // host_id, // buffer_send_indices.size()); // } // caller_line = roots_start; // } // Get the indices std::vector<std::pair<VertexID, VertexID> > indices_buffer; one_host_bcasts_buffer_to_buffer(root, buffer_send_indices, indices_buffer); // {//test // if (1024 == roots_start && 0 == host_id) { // printf("S3 indices_buffer: %lu\n", indices_buffer.size()); // } // } if (indices_buffer.empty()) { continue; } // Get the labels std::vector<VertexID> labels_buffer; one_host_bcasts_buffer_to_buffer(root, buffer_send_labels, labels_buffer); // {//test // if (1024 == roots_start && 0 == host_id) { // printf("indices_buffer.size(): %lu " // "labels_buffer.size(): %lu\n", // indices_buffer.size(), // labels_buffer.size()); // } // } VertexID size_indices_buffer = indices_buffer.size(); // Prepare the offsets for reading indices_buffer std::vector<EdgeID> starts_locs_index(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; starts_locs_index[i_i] = e.second; } EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index); // Prepare the offsets for inserting v_tails into queue std::vector<VertexID> offsets_tmp_queue(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; offsets_tmp_queue[i_i] = G.local_out_degrees[e.first]; } EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue); std::vector<VertexID> tmp_got_candidates_queue; std::vector<VertexID> sizes_tmp_got_candidates_queue; std::vector<VertexID> tmp_once_candidated_queue; std::vector<VertexID> sizes_tmp_once_candidated_queue; try { tmp_got_candidates_queue.resize(num_ngbrs); sizes_tmp_got_candidates_queue.resize(size_indices_buffer, 0); tmp_once_candidated_queue.resize(num_ngbrs); sizes_tmp_once_candidated_queue.resize(size_indices_buffer, 0); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("schedule_label_pushing_para.tmp_queues: bad_alloc " "host_id: %d " "num_ngbrs: %lu " "size_indices_buffer: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, num_ngbrs, size_indices_buffer, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { VertexID v_head_global = indices_buffer[i_i].first; // {//test // if (1024 == roots_start && 0 == host_id) { // printf("i_i: %u " // "v_head_global: %u\n", // i_i, // v_head_global); // } // } EdgeID start_index = starts_locs_index[i_i]; EdgeID bound_index = i_i != size_indices_buffer - 1 ? starts_locs_index[i_i + 1] : total_recved_labels; if (G.local_out_degrees[v_head_global]) { local_push_labels_para( v_head_global, start_index, bound_index, roots_start, labels_buffer, G, short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, tmp_got_candidates_queue, sizes_tmp_got_candidates_queue[i_i], offsets_tmp_queue[i_i], got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, tmp_once_candidated_queue, sizes_tmp_once_candidated_queue[i_i], once_candidated, bp_labels_table, used_bp_roots, iter); } } {// Collect elements from tmp_got_candidates_queue to got_candidates_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue); PADO::collect_into_queue( tmp_got_candidates_queue, offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue total_new, got_candidates_queue, end_got_candidates_queue); } {// Collect elements from tmp_once_candidated_queue to once_candidated_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); PADO::collect_into_queue( tmp_once_candidated_queue, offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue total_new, once_candidated_queue, end_once_candidated_queue); } } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator[label_root_id] = 1; {// Deal with race condition if (!PADO::CAS(SI_v_tail.indicator.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { // The label is already selected before continue; } } // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in if (PADO::CAS(once_candidated.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail_local; } // once_candidated[v_tail_local] = 1; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // const IndexType &L_label = L[label_global_id]; // _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); const BPLabelType &L_label = bp_labels_table[label_root_id]; bool no_need_add = false; for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; break; } } } if (no_need_add) { continue; } // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = 1; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!SI_v_tail.is_candidate[label_root_id]) { if (CAS(SI_v_tail.is_candidate.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { PADO::TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id); } } // Add into got_candidates queue // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = 1; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } if (!got_candidates[v_tail_local]) { if (CAS(got_candidates.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_got_candidates_queue[offset_tmp_queue + size_tmp_got_candidates_queue++] = v_tail_local; } } } } // { // assert(iter >= iter); // } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; if (SI_v_tail.indicator[label_root_id]) { // The label is already selected before continue; } // Record label_root_id as once selected by v_tail_global SI_v_tail.indicator[label_root_id] = 1; // SI_v_tail.indicator.set(label_root_id); // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in once_candidated[v_tail_local] = 1; once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // const IndexType &L_label = L[label_global_id]; // _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); const BPLabelType &L_label = bp_labels_table[label_root_id]; bool no_need_add = false; for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; break; } } } if (no_need_add) { continue; } if (SI_v_tail.is_candidate[label_root_id]) { continue; } SI_v_tail.is_candidate[label_root_id] = 1; SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!got_candidates[v_tail_local]) { // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) got_candidates[v_tail_local] = 1; got_candidates_queue[end_got_candidates_queue++] = v_tail_local; } } } // { // assert(iter >= iter); // } } //// Function: pushes v_head's labels to v_head's every (master) neighbor //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // // The data structure of a message //// std::vector< LabelUnitType > buffer_recv; // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin() -> start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin() -> size; // // Traverse v_head's every neighbor v_tail // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // // Traverse v_head's last inserted labels // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // VertexID label_global_id = label_root_id + roots_start; // if (v_tail_global <= label_global_id) { // // v_tail_global has higher rank than the label // continue; // } // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // const BPLabelType &L_label = bp_labels_table[label_root_id]; // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } // } // // { // assert(iter >= iter); // } //} //// DEPRECATED Function: in the scatter phase, synchronize local masters to mirrors on other hosts //// Has some mysterious problem: when I call this function, some hosts will receive wrong messages; when I copy all //// code of this function into the caller, all messages become right. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //sync_masters_2_mirrors( // const DistGraph &G, // const std::vector<VertexID> &active_queue, // VertexID end_active_queue, // std::vector< std::pair<VertexID, VertexID> > &buffer_send, // std::vector<MPI_Request> &requests_send //) //{ //// std::vector< std::pair<VertexID, VertexID> > buffer_send; // // pair.first: Owener vertex ID of the label // // pair.first: label vertex ID of the label // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send.emplace_back(v_head_global, label_root_id); //// {//test //// if (1 == host_id) { //// printf("@%u host_id: %u v_head_global: %u\n", __LINE__, host_id, v_head_global);// //// } //// } // } // } // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u sync_masters_2_mirrors: buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // assert(!requests_send.empty()); // } // // // Send messages // for (int loc = 0; loc < num_hosts - 1; ++loc) { // int dest_host_id = G.buffer_send_list_loc_2_master_host_id(loc); // MPI_Isend(buffer_send.data(), // MPI_Instance::get_sending_size(buffer_send), // MPI_CHAR, // dest_host_id, // SENDING_MASTERS_TO_MIRRORS, // MPI_COMM_WORLD, // &requests_send[loc]); // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u dest_host_id: %u buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, dest_host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // } // } //} template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: schedule_label_inserting_para( const DistGraph &G, const VertexID roots_start, const VertexID roots_size, std::vector<ShortIndex> &short_index, const std::vector< std::vector<UnweightedDist> > &dist_table, const std::vector<VertexID> &got_candidates_queue, const VertexID start_got_candidates_queue, const VertexID size_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<uint8_t> &is_active, std::vector< std::pair<VertexID, VertexID> > &buffer_send, const VertexID iter) { const VertexID bound_got_candidates_queue = start_got_candidates_queue + size_got_candidates_queue; std::vector<VertexID> offsets_tmp_active_queue; std::vector<VertexID> tmp_active_queue; std::vector<VertexID> sizes_tmp_active_queue; std::vector<EdgeID> offsets_tmp_buffer_send; std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send; std::vector<EdgeID> sizes_tmp_buffer_send; EdgeID total_send_labels; try { offsets_tmp_active_queue.resize(size_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = 0; i_q < size_got_candidates_queue; ++i_q) { offsets_tmp_active_queue[i_q] = i_q; } tmp_active_queue.resize(size_got_candidates_queue); sizes_tmp_active_queue.resize(size_got_candidates_queue, 0); // Size will only be 0 or 1, but it will become offsets eventually. // Prepare for parallel buffer_send // std::vector<EdgeID> offsets_tmp_buffer_send(size_got_candidates_queue); offsets_tmp_buffer_send.resize(size_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = start_got_candidates_queue; i_q < bound_got_candidates_queue; ++i_q) { VertexID v_id_local = got_candidates_queue[i_q]; VertexID v_global_id = G.get_global_vertex_id(v_id_local); VertexID tmp_i_q = i_q - start_got_candidates_queue; if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) { // If v_global_id is root, its new labels should be put into buffer_send offsets_tmp_buffer_send[tmp_i_q] = short_index[v_id_local].end_candidates_que; } else { offsets_tmp_buffer_send[tmp_i_q] = 0; } } total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send); tmp_buffer_send.resize(total_send_labels); sizes_tmp_buffer_send.resize(size_got_candidates_queue, 0); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("L%u_tmp_buffer_send: bad_alloc " "host_id: %d " "iter: %u " "size_got_candidates_queue: %u " "total_send_labels: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", __LINE__, host_id, iter, size_got_candidates_queue, total_send_labels, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } #pragma omp parallel for for (VertexID i_queue = start_got_candidates_queue; i_queue < bound_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID tmp_i_queue = i_queue - start_got_candidates_queue; VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; tmp_active_queue[tmp_i_queue + sizes_tmp_active_queue[tmp_i_queue]++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_para( cand_root_id, v_id_local, roots_start, roots_size, G, tmp_buffer_send, sizes_tmp_buffer_send[tmp_i_queue], offsets_tmp_buffer_send[tmp_i_queue]); // buffer_send); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, // short_index, // b_id, iter); } } {// Collect elements from tmp_active_queue to active_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue); PADO::collect_into_queue( tmp_active_queue, offsets_tmp_active_queue, sizes_tmp_active_queue, total_new, active_queue, end_active_queue); } {// Collect elements from tmp_buffer_send to buffer_send EdgeID old_size_buffer_send = buffer_send.size(); EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send); try { buffer_send.resize(total_new + old_size_buffer_send); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("L%u_buffer_send: bad_alloc " "iter: %u " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", __LINE__, iter, host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // EdgeID zero_size = 0; PADO::collect_into_queue( tmp_buffer_send, offsets_tmp_buffer_send, sizes_tmp_buffer_send, total_new, buffer_send, old_size_buffer_send); // zero_size); } } // Function for distance query; // traverse vertex v_id's labels; // return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label. template <VertexID BATCH_SIZE> inline bool DistBVCPLL<BATCH_SIZE>:: distance_query( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter) { VertexID cand_real_id = cand_root_id + roots_start; const IndexType &Lv = L[v_id_local]; // Traverse v_id's all existing labels // VertexID b_i_bound = Lv.batches.size(); // _mm_prefetch(&Lv.batches[0], _MM_HINT_T0); _mm_prefetch(&Lv.distances[0], _MM_HINT_T0); _mm_prefetch(&Lv.vertices[0], _MM_HINT_T0); //_mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lv.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_table // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { VertexID dist_bound_index = Lv.distances.size(); for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { UnweightedDist dist = Lv.distances[dist_i].dist; // Cannot use this, because no batch_id any more, so distances are not all in order among batches. // if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered. // // If the half path distance is already greater than their targeted distance, jump to next batch // break; // } VertexID v_start_index = Lv.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lv.distances[dist_i].size; // _mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); _mm_prefetch(reinterpret_cast<const char *>(dist_table[cand_root_id].data()), _MM_HINT_T0); for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // VertexID v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id VertexID v = Lv.vertices[v_i]; // v is a label hub of v_id if (v >= cand_real_id) { // Vertex cand_real_id cannot have labels whose ranks are lower than it, // in which case dist_table[cand_root_id][v] does not exist. continue; } VertexID d_tmp = dist + dist_table[cand_root_id][v]; if (d_tmp <= iter) { return false; } } } // } return true; } //// Sequential version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_seq( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send) // UnweightedDist iter) { try { VertexID cand_real_id = cand_root_id + roots_start; L[v_id_local].vertices.push_back(cand_real_id); // L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { // VertexID cand_real_id = cand_root_id + roots_start; // dist_table[v_root_id][cand_real_id] = iter; // Put the update into the buffer_send for later sending buffer_send.emplace_back(v_root_id, cand_real_id); } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("insert_label_only_seq: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } //// Parallel Version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send) { try { VertexID cand_real_id = cand_root_id + roots_start; L[v_id_local].vertices.push_back(cand_real_id); // L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { // VertexID cand_real_id = cand_root_id + roots_start; // Put the update into the buffer_send for later sending tmp_buffer_send[offset_tmp_buffer_send + size_tmp_buffer_send++] = std::make_pair(v_root_id, cand_real_id); } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("insert_label_only_para: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } // Function updates those index arrays in v_id's label only if v_id has been inserted new labels template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: update_label_indices( const VertexID v_id_local, const VertexID inserted_count, // std::vector<IndexType> &L, // std::vector<ShortIndex> &short_index, // VertexID b_id, const UnweightedDist iter) { try { IndexType &Lv = L[v_id_local]; // // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch // if (short_index[v_id_local].indicator[BATCH_SIZE]) { // // Increase the batches' last element's size because a new distance element need to be added // ++(Lv.batches.rbegin() -> size); // } else { // short_index[v_id_local].indicator[BATCH_SIZE] = 1; //// short_index[v_id_local].indicator.set(BATCH_SIZE); // // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added // Lv.batches.emplace_back( // b_id, // batch id // Lv.distances.size(), // start index // 1); // size // } // Insert a new distance element with start_index, size, and dist Lv.distances.emplace_back( Lv.vertices.size() - inserted_count, // start index inserted_count, // size iter); // distance } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("update_label_indices: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } // Function to reset dist_table the distance buffer to INF // Traverse every root's labels to reset its distance buffer elements to INF. // In this way to reduce the cost of initialization of the next batch. template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: reset_at_end( const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, const std::vector<VertexID> &once_candidated_queue, const VertexID end_once_candidated_queue) { // // Reset dist_table according to local masters' labels // for (VertexID r_local_id : roots_master_local) { // IndexType &Lr = L[r_local_id]; // VertexID r_root_id = G.get_global_vertex_id(r_local_id) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_table // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = MAX_UNWEIGHTED_DIST; // } // } // } // } // Reset dist_table according to received masters' labels from other hosts for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { for (VertexID cand_real_id : recved_dist_table[r_root_id]) { dist_table[r_root_id][cand_real_id] = MAX_UNWEIGHTED_DIST; } recved_dist_table[r_root_id].clear(); } // Reset bit-parallel labels table for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { memset(bp_labels_table[r_root_id].bp_dist, 0, sizeof(bp_labels_table[r_root_id].bp_dist)); memset(bp_labels_table[r_root_id].bp_sets, 0, sizeof(bp_labels_table[r_root_id].bp_sets)); } // Remove labels of local minimum set for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local_id = once_candidated_queue[v_i]; if (!G.is_local_minimum[v_local_id]) { continue; } L[v_local_id].clean_all_indices(); } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: batch_process( const DistGraph &G, // const VertexID b_id, const VertexID roots_start, // start id of roots const VertexID roots_size, // how many roots in the batch const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated) // std::vector<bool> &once_candidated) { // At the beginning of a batch, initialize the labels L and distance buffer dist_table; // initializing_time -= WallTimer::get_time_mark(); // The Maximum of active vertices among hosts. VertexID global_num_actives = initialization(G, short_index, dist_table, recved_dist_table, bp_labels_table, active_queue, end_active_queue, once_candidated_queue, end_once_candidated_queue, once_candidated, // b_id, roots_start, roots_size, // roots_master_local, used_bp_roots); // initializing_time += WallTimer::get_time_mark(); UnweightedDist iter = 0; // The iterator, also the distance for current iteration // {//test // if (0 == host_id) { // printf("host_id: %u initialization finished.\n", host_id); // } // } while (global_num_actives) { ++iter; {// Limit the distance if (iter >7 ) { end_active_queue = 0; break; } } //#ifdef DEBUG_MESSAGES_ON // {//test //// if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("iter: %u " // "host_id: %d " // "global_num_actives: %u " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // iter, // host_id, // global_num_actives, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); //// } // } //#endif // Traverse active vertices to push their labels as candidates // Send masters' newly added labels to other hosts try { // scatter_time -= WallTimer::get_time_mark(); // Divide the pushing into many-time runs. const VertexID chunk_size = 1 << 14; VertexID remainder = global_num_actives % chunk_size; VertexID bound_global_i = global_num_actives - remainder; // VertexID remainder = end_active_queue % chunk_size; // VertexID bound_active_queue = end_active_queue - remainder; VertexID local_size; for (VertexID global_i = 0; global_i < bound_global_i; global_i += chunk_size) { if (global_i < end_active_queue) { local_size = end_active_queue - global_i; } else { local_size = 0; } // {//test // if (1024 == roots_start && 7 == host_id) { // printf("S0 host_id: %d global_i: %u bound_global_i: %u local_size: %u\n", // host_id, global_i, bound_global_i, local_size); // } // } schedule_label_pushing_para( G, roots_start, used_bp_roots, active_queue, global_i, chunk_size, local_size, got_candidates_queue, end_got_candidates_queue, short_index, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated, iter); } if (remainder) { if (bound_global_i < end_active_queue) { local_size = end_active_queue - bound_global_i; } else { local_size = 0; } schedule_label_pushing_para( G, roots_start, used_bp_roots, active_queue, bound_global_i, remainder, local_size, got_candidates_queue, end_got_candidates_queue, short_index, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated, iter); } // // schedule_label_pushing_para( // G, // roots_start, // used_bp_roots, // active_queue, // 0, // end_active_queue, // got_candidates_queue, // end_got_candidates_queue, // short_index, // bp_labels_table, // got_candidates, // is_active, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // iter); end_active_queue = 0; // scatter_time += WallTimer::get_time_mark(); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("pushing: bad_alloc " "iter: %u " "host_id: %d " "global_num_actives: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", iter, host_id, global_num_actives, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // {//test // if (0 == host_id) { // printf("host_id: %u pushing finished...\n", host_id); // } // } // Traverse vertices in the got_candidates_queue to insert labels { // gather_time -= WallTimer::get_time_mark(); std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // pair.first: root id // pair.second: label (global) id of the root if (end_got_candidates_queue >= THRESHOLD_PARALLEL) { const VertexID chunk_size = 1 << 4; VertexID remainder = end_got_candidates_queue % chunk_size; VertexID bound_i_q = end_got_candidates_queue - remainder; for (VertexID i_q = 0; i_q < bound_i_q; i_q += chunk_size) { schedule_label_inserting_para( G, roots_start, roots_size, short_index, dist_table, got_candidates_queue, i_q, chunk_size, got_candidates, active_queue, end_active_queue, is_active, buffer_send, iter); } if (remainder) { schedule_label_inserting_para( G, roots_start, roots_size, short_index, dist_table, got_candidates_queue, bound_i_q, remainder, got_candidates, active_queue, end_active_queue, is_active, buffer_send, iter); } ////// Backup // // Prepare for parallel active_queue // // Don't need offsets_tmp_active_queue here, because the index i_queue is the offset already. // // Actually we still need offsets_tmp_active_queue, because collect_into_queue() needs it. // std::vector<VertexID> offsets_tmp_active_queue; // std::vector<VertexID> tmp_active_queue; // std::vector<VertexID> sizes_tmp_active_queue; // std::vector<EdgeID> offsets_tmp_buffer_send; // std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send; // std::vector<EdgeID> sizes_tmp_buffer_send; // EdgeID total_send_labels; // // try { // offsets_tmp_active_queue.resize(end_got_candidates_queue); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { // offsets_tmp_active_queue[i_q] = i_q; // } // tmp_active_queue.resize(end_got_candidates_queue); // sizes_tmp_active_queue.resize(end_got_candidates_queue, // 0); // Size will only be 0 or 1, but it will become offsets eventually. // // // Prepare for parallel buffer_send //// std::vector<EdgeID> offsets_tmp_buffer_send(end_got_candidates_queue); // offsets_tmp_buffer_send.resize(end_got_candidates_queue); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { // VertexID v_id_local = got_candidates_queue[i_q]; // VertexID v_global_id = G.get_global_vertex_id(v_id_local); // if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) { // // If v_global_id is root, its new labels should be put into buffer_send // offsets_tmp_buffer_send[i_q] = short_index[v_id_local].end_candidates_que; // } else { // offsets_tmp_buffer_send[i_q] = 0; // } // } // total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send); // tmp_buffer_send.resize(total_send_labels); // sizes_tmp_buffer_send.resize(end_got_candidates_queue, 0); // } // catch (const std::bad_alloc &) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("L%u_tmp_buffer_send: bad_alloc " // "host_id: %d " // "iter: %u " // "end_got_candidates_queue: %u " // "total_send_labels: %u " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // __LINE__, // host_id, // iter, // end_got_candidates_queue, // total_send_labels, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); // exit(1); // } // //#pragma omp parallel for // for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { // VertexID v_id_local = got_candidates_queue[i_queue]; // VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id_local] = 0; // reset got_candidates // // Traverse v_id's all candidates // VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; // for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; // short_index[v_id_local].is_candidate[cand_root_id] = 0; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if (distance_query( // cand_root_id, // v_id_local, // roots_start, // // L, // dist_table, // iter)) { // if (!is_active[v_id_local]) { // is_active[v_id_local] = 1; //// active_queue[end_active_queue++] = v_id_local; // tmp_active_queue[i_queue + sizes_tmp_active_queue[i_queue]++] = v_id_local; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only_para( // cand_root_id, // v_id_local, // roots_start, // roots_size, // G, // tmp_buffer_send, // sizes_tmp_buffer_send[i_queue], // offsets_tmp_buffer_send[i_queue]); //// buffer_send); // } // } // short_index[v_id_local].end_candidates_que = 0; // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id_local, // inserted_count, // // L, //// short_index, //// b_id, // iter); // } // } // // {// Collect elements from tmp_active_queue to active_queue // VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue); // PADO::collect_into_queue( // tmp_active_queue, // offsets_tmp_active_queue, // sizes_tmp_active_queue, // total_new, // active_queue, // end_active_queue); // } // {// Collect elements from tmp_buffer_send to buffer_send // EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send); // try { // buffer_send.resize(total_new); // } // catch (const std::bad_alloc &) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("L%u_buffer_send: bad_alloc " // "iter: %u " // "host_id: %d " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // __LINE__, // iter, // host_id, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); // exit(1); // } // EdgeID zero_size = 0; // PADO::collect_into_queue( // tmp_buffer_send, // offsets_tmp_buffer_send, // sizes_tmp_buffer_send, // total_new, // buffer_send, // zero_size); // } } else { for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; active_queue[end_active_queue++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_seq( cand_root_id, v_id_local, roots_start, roots_size, G, // dist_table, buffer_send); // iter); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, // short_index, // b_id, iter); } } } // {//test // printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); // } end_got_candidates_queue = 0; // Set the got_candidates_queue empty // Sync the dist_table for (int root = 0; root < num_hosts; ++root) { std::vector<std::pair<VertexID, VertexID>> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); try { if (size_buffer_recv >= THRESHOLD_PARALLEL) { // Get label number for every root std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); } // Resize the recved_dist_table for every root #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID old_size = recved_dist_table[root_id].size(); VertexID tmp_size = sizes_recved_root_labels[root_id]; if (tmp_size) { recved_dist_table[root_id].resize(old_size + tmp_size); sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // If tmp_size == 0, root_id has no received labels. // sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // Recorde received labels in recved_dist_table #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], cand_real_id); } } else { for (const std::pair<VertexID, VertexID> &e : buffer_recv) { VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; // Record the received element, for future reset recved_dist_table[root_id].push_back(cand_real_id); } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("recved_dist_table: bad_alloc " "host_id: %d " "iter: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, iter, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } // Sync the global_num_actives MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, MPI_MAX, // MPI_SUM, MPI_COMM_WORLD); // gather_time += WallTimer::get_time_mark(); } // {//test // if (0 == host_id) { // printf("iter: %u inserting labels finished.\n", iter); // } // } } // Reset the dist_table // clearup_time -= WallTimer::get_time_mark(); reset_at_end( G, // roots_start, // roots_master_local, dist_table, recved_dist_table, bp_labels_table, once_candidated_queue, end_once_candidated_queue); // clearup_time += WallTimer::get_time_mark(); // {//test // if (0 == host_id) { // printf("host_id: %u resetting finished.\n", host_id); // } // } } //// Sequential Version //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //batch_process( // const DistGraph &G, // VertexID b_id, // VertexID roots_start, // start id of roots // VertexID roots_size, // how many roots in the batch // const std::vector<uint8_t> &used_bp_roots, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<uint8_t> &got_candidates, //// std::vector<bool> &got_candidates, // std::vector<uint8_t> &is_active, //// std::vector<bool> &is_active, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated) //// std::vector<bool> &once_candidated) //{ // // At the beginning of a batch, initialize the labels L and distance buffer dist_table; // initializing_time -= WallTimer::get_time_mark(); // VertexID global_num_actives = initialization(G, // short_index, // dist_table, // recved_dist_table, // bp_labels_table, // active_queue, // end_active_queue, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // b_id, // roots_start, // roots_size, //// roots_master_local, // used_bp_roots); // initializing_time += WallTimer::get_time_mark(); // UnweightedDist iter = 0; // The iterator, also the distance for current iteration //// {//test //// printf("host_id: %u initialization finished.\n", host_id); //// } // // // while (global_num_actives) { ////#ifdef DEBUG_MESSAGES_ON //// {// //// if (0 == host_id) { //// printf("iter: %u global_num_actives: %u\n", iter, global_num_actives); //// } //// } ////#endif // ++iter; // // Traverse active vertices to push their labels as candidates // // Send masters' newly added labels to other hosts // { // scatter_time -= WallTimer::get_time_mark(); // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels.push_back(label_root_id); // } // } // end_active_queue = 0; // // for (int root = 0; root < num_hosts; ++root) { // // Get the indices // std::vector< std::pair<VertexID, VertexID> > indices_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_indices, // indices_buffer); // if (indices_buffer.empty()) { // continue; // } // // Get the labels // std::vector<VertexID> labels_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_labels, // labels_buffer); // // Push those labels // EdgeID start_index = 0; // for (const std::pair<VertexID, VertexID> e : indices_buffer) { // VertexID v_head_global = e.first; // EdgeID bound_index = start_index + e.second; // if (G.local_out_degrees[v_head_global]) { // local_push_labels( // v_head_global, // start_index, // bound_index, // roots_start, // labels_buffer, // G, // short_index, // got_candidates_queue, // end_got_candidates_queue, // got_candidates, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // bp_labels_table, // used_bp_roots, // iter); // } // start_index = bound_index; // } // } // scatter_time += WallTimer::get_time_mark(); // } // // // Traverse vertices in the got_candidates_queue to insert labels // { // gather_time -= WallTimer::get_time_mark(); // std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // // pair.first: root id // // pair.second: label (global) id of the root // for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { // VertexID v_id_local = got_candidates_queue[i_queue]; // VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id_local] = 0; // reset got_candidates // // Traverse v_id's all candidates // VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; // for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; // short_index[v_id_local].is_candidate[cand_root_id] = 0; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if ( distance_query( // cand_root_id, // v_id_local, // roots_start, // // L, // dist_table, // iter) ) { // if (!is_active[v_id_local]) { // is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only( // cand_root_id, // v_id_local, // roots_start, // roots_size, // G, //// dist_table, // buffer_send); //// iter); // } // } // short_index[v_id_local].end_candidates_que = 0; // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id_local, // inserted_count, // // L, // short_index, // b_id, // iter); // } // } //// {//test //// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); //// } // end_got_candidates_queue = 0; // Set the got_candidates_queue empty // // Sync the dist_table // for (int root = 0; root < num_hosts; ++root) { // std::vector<std::pair<VertexID, VertexID>> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, VertexID> &e : buffer_recv) { // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // dist_table[root_id][cand_real_id] = iter; // // Record the received element, for future reset // recved_dist_table[root_id].push_back(cand_real_id); // } // } // // // Sync the global_num_actives // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // gather_time += WallTimer::get_time_mark(); // } // } // // // Reset the dist_table // clearup_time -= WallTimer::get_time_mark(); // reset_at_end( //// G, //// roots_start, //// roots_master_local, // dist_table, // recved_dist_table, // bp_labels_table); // clearup_time += WallTimer::get_time_mark(); //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Every host h_i broadcast to others // for (int root = 0; root < num_hosts; ++root) { // std::vector<E_T> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } //// uint64_t size_buffer_send = buffer_send.size(); //// // Sync the size_buffer_send. //// message_time -= WallTimer::get_time_mark(); //// MPI_Bcast(&size_buffer_send, //// 1, //// MPI_UINT64_T, //// root, //// MPI_COMM_WORLD); //// message_time += WallTimer::get_time_mark(); ////// {// test ////// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); ////// } //// if (!size_buffer_send) { //// continue; //// } //// message_time -= WallTimer::get_time_mark(); //// std::vector<E_T> buffer_recv(size_buffer_send); //// if (host_id == root) { //// buffer_recv.assign(buffer_send.begin(), buffer_send.end()); //// } //// uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; //// if (bytes_buffer_send < static_cast<size_t>(INT_MAX)) { //// // Only need 1 broadcast //// //// MPI_Bcast(buffer_recv.data(), //// bytes_buffer_send, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// } else { //// const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; //// const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; //// size_t offset = 0; //// for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { ////// size_t offset = b_i * unit_buffer_size; //// size_t size_unit_buffer = b_i == num_unit_buffers - 1 //// ? size_buffer_send - offset //// : unit_buffer_size; //// MPI_Bcast(buffer_recv.data() + offset, //// size_unit_buffer * ETypeSize, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// offset += unit_buffer_size; //// } //// } //// message_time += WallTimer::get_time_mark(); // for (const E_T &e : buffer_recv) { // fun(e); // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // // Every host sends to others // for (int src = 0; src < num_hosts; ++src) { // if (host_id == src) { // // Send from src // message_time -= WallTimer::get_time_mark(); // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, host_id); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // } // message_time += WallTimer::get_time_mark(); // } else { // // Receive from src // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, src); // if (host_id == dst) { // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // Every host sends (num_hosts - 1) times // for (int hop = 1; hop < num_hosts; ++hop) { // int src = hop_2_me_host_id(-hop); // int dst = hop_2_me_host_id(hop); // if (src != dst) { // Normal case // // When host_id is odd, first receive, then send. // if (static_cast<uint32_t>(host_id) & 1U) { // message_time -= WallTimer::get_time_mark(); // // Receive first. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // // Send then. // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // When host_id is even, first send, then receive. // // Send first. // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // // Receive then. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } else { // If host_id is higher than dst, first send, then receive // // This is a special case. It only happens when the num_hosts is even and hop equals to num_hosts/2. // if (host_id < dst) { // // Send // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Receive // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // Otherwise, if host_id is lower than dst, first receive, then send // // Receive // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Send // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } //} //// DEPRECATED version Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // const uint32_t UNIT_BUFFER_SIZE = 16U << 20U; // // Every host h_i broadcast to others // for (int h_i = 0; h_i < num_hosts; ++h_i) { // uint64_t size_buffer_send = buffer_send.size(); // // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&size_buffer_send, // 1, // MPI_UINT64_T, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); //// {// test //// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); //// } // if (!size_buffer_send) { // continue; // } // uint32_t num_unit_buffers = (size_buffer_send + UNIT_BUFFER_SIZE - 1) / UNIT_BUFFER_SIZE; // // // Broadcast the buffer_send // for (uint32_t b_i = 0; b_i < num_unit_buffers; ++b_i) { // // Prepare the unit buffer // message_time -= WallTimer::get_time_mark(); // size_t offset = b_i * UNIT_BUFFER_SIZE; // size_t size_unit_buffer = b_i == num_unit_buffers - 1 // ? size_buffer_send - offset // : UNIT_BUFFER_SIZE; // std::vector<E_T> unit_buffer(size_unit_buffer); // // Copy the messages from buffer_send to unit buffer. // if (host_id == h_i) { // unit_buffer.assign(buffer_send.begin() + offset, buffer_send.begin() + offset + size_unit_buffer); // } // // Broadcast the unit buffer // MPI_Bcast(unit_buffer.data(), // MPI_Instance::get_sending_size(unit_buffer), // MPI_CHAR, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // // Process every element of unit_buffer // for (const E_T &e : unit_buffer) { // fun(e); // } // } // } //} // Function: Host root broadcasts its sending buffer to a receiving buffer. template <VertexID BATCH_SIZE> template <typename E_T> inline void DistBVCPLL<BATCH_SIZE>:: one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv) { const size_t ETypeSize = sizeof(E_T); volatile uint64_t size_buffer_send = 0; if (host_id == root) { size_buffer_send = buffer_send.size(); } // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); // {//test // if (0 == root && size_buffer_send == 16 && 1024 == caller_line) { //// if (0 == root && size_buffer_send == 16 && 0 == host_id) { // printf("before: host_id: %d size_buffer_send: %lu\n", // host_id, // size_buffer_send); // } // } MPI_Bcast((void *) &size_buffer_send, 1, MPI_UINT64_T, root, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // {//test //// if (0 == root && size_buffer_send == 16 && 0 == host_id) { // if (0 == root && size_buffer_send == 16 && 1024 == caller_line) { // printf("after: host_id: %d size_buffer_send: %lu\n", // host_id, // size_buffer_send); // } // } try { buffer_recv.resize(size_buffer_send); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("one_host_bcasts_buffer_to_buffer: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } if (!size_buffer_send) { return; } // Broadcast the buffer_send // message_time -= WallTimer::get_time_mark(); if (host_id == root) { // buffer_recv.assign(buffer_send.begin(), buffer_send.end()); buffer_recv.swap(buffer_send); } uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; if (bytes_buffer_send <= static_cast<size_t>(INT_MAX)) { // Only need 1 broadcast MPI_Bcast(buffer_recv.data(), bytes_buffer_send, MPI_CHAR, root, MPI_COMM_WORLD); } else { const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; size_t offset = 0; for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { size_t size_unit_buffer = b_i == num_unit_buffers - 1 ? size_buffer_send - offset : unit_buffer_size; MPI_Bcast(buffer_recv.data() + offset, size_unit_buffer * ETypeSize, MPI_CHAR, root, MPI_COMM_WORLD); offset += unit_buffer_size; } } // message_time += WallTimer::get_time_mark(); } } #endif //PADO_DPADO_H
nodal_update_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Original author: Ruben Zorrilla // #if !defined( KRATOS_NODAL_UPDATE_UTILITIES ) #define KRATOS_NODAL_UPDATE_UTILITIES /* System includes */ #include <set> /* External includes */ /* Project includes */ #include "includes/define.h" #include "includes/variables.h" #include "includes/mesh_moving_variables.h" #include "includes/fsi_variables.h" #include "containers/array_1d.h" #include "includes/model_part.h" #include "includes/communicator.h" #include "includes/ublas_interface.h" #include "utilities/openmp_utils.h" #include "utilities/variable_utils.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /** Short class definition. Detail class definition. */ template <unsigned int TDim> class NodalUpdateBaseClass { public: /** Type Definitions */ /*@{ */ //~ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( NodalUpdateBaseClass ); /*@} */ /** Constructor. */ /** * Empty constructor */ NodalUpdateBaseClass() {} /*@} */ /** Copy constructor. */ /*@{ */ NodalUpdateBaseClass(const NodalUpdateBaseClass& Other); /*@{ */ /** Destructor. */ /*@{ */ virtual ~NodalUpdateBaseClass() {} /*@} */ /**@name Public Operators*/ /*@{ */ /** * Computes the displacement time derivatives according to the computed displacement values. * @param rInterfaceModelPart: modelpart in where the nodal update is to be performed * @param timeStep: time step value */ virtual void UpdateMeshTimeDerivatives(ModelPart& rModelPart, const double timeStep) { KRATOS_ERROR << "Calling the nodal update base class UpdateMeshTimeDerivatives() method. Call the proper time scheme derived one."; } /** * Sets the fluid interface time derivatives as the mesh displacement computed values. * @param rInterfaceModelPart: modelpart in where the nodal update is to be performed */ virtual void SetMeshTimeDerivativesOnInterface(ModelPart& rInterfaceModelPart) { auto& rLocalMesh = rInterfaceModelPart.GetCommunicator().LocalMesh(); ModelPart::NodeIterator local_mesh_nodes_begin = rLocalMesh.NodesBegin(); #pragma omp parallel for firstprivate(local_mesh_nodes_begin) for(int k=0; k<static_cast<int>(rLocalMesh.NumberOfNodes()); ++k) { ModelPart::NodeIterator it_node = local_mesh_nodes_begin+k; array_1d<double, 3>& v_node = it_node->FastGetSolutionStepValue(VELOCITY); // Current step interface velocity noalias(v_node) = it_node->FastGetSolutionStepValue(MESH_VELOCITY); // Set the current interface velocity as the mesh velocity; } rInterfaceModelPart.GetCommunicator().SynchronizeVariable(VELOCITY); } /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ /*@} */ /**@name Protected Operators*/ /*@{ */ /*@} */ /**@name Protected Operations*/ /*@{ */ /**@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ private: /*@} */ /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class NodalUpdateBaseClass */ /** Short class definition. Detail class definition. */ template <unsigned int TDim> class NodalUpdateNewmark : public NodalUpdateBaseClass<TDim> { public: /** Type Definitions */ /*@{ */ //~ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( NodalUpdateNewmark ); /*@} */ /** Constructor. */ /** * Empty constructor */ NodalUpdateNewmark(const double BossakAlpha = -0.3) { const double bossak_f = 0.0; const double bossak_beta = 0.25; const double bossak_gamma = 0.5; mBossakBeta = std::pow((1.0 + bossak_f - BossakAlpha), 2) * bossak_beta; mBossakGamma = bossak_gamma + bossak_f - BossakAlpha; } /*@} */ /** Copy constructor. */ /*@{ */ NodalUpdateNewmark(const NodalUpdateNewmark& Other); /*@{ */ /** Destructor. */ /*@{ */ virtual ~NodalUpdateNewmark() {} /*@} */ /**@name Public Operators*/ /*@{ */ /** * Computes the displacement time derivatives according to the computed displacement values using the Newmark (Bossak) scheme. * @param rModelPart: modelpart in where the nodal update is to be performed * @param timeStep: time step value */ void UpdateMeshTimeDerivatives(ModelPart &rModelPart, const double timeStep) override{ auto& rLocalMesh = rModelPart.GetCommunicator().LocalMesh(); ModelPart::NodeIterator local_mesh_nodes_begin = rLocalMesh.NodesBegin(); #pragma omp parallel for firstprivate(local_mesh_nodes_begin) for(int k = 0; k < static_cast<int>(rLocalMesh.NumberOfNodes()); ++k) { const ModelPart::NodeIterator it_node = local_mesh_nodes_begin+k; const array_1d<double, 3>& umesh_n = it_node->FastGetSolutionStepValue(MESH_DISPLACEMENT, 1); // Previous step mesh displacement const array_1d<double, 3>& vmesh_n = it_node->FastGetSolutionStepValue(MESH_VELOCITY, 1); // Previous step mesh velocity const array_1d<double, 3>& amesh_n = it_node->FastGetSolutionStepValue(MESH_ACCELERATION, 1); // Previous step mesh acceleration const array_1d<double, 3>& umesh_n1 = it_node->FastGetSolutionStepValue(MESH_DISPLACEMENT); // Current step mesh displacement array_1d<double, 3>& vmesh_n1 = it_node->FastGetSolutionStepValue(MESH_VELOCITY); // Current step mesh velocity (to be updated) array_1d<double, 3>& amesh_n1 = it_node->FastGetSolutionStepValue(MESH_ACCELERATION); // Current step mesh acceleration (to be updated) const double const_u = mBossakGamma / (timeStep * mBossakBeta); const double const_v = 1.0 - mBossakGamma / mBossakBeta; const double const_a = timeStep * (1.0 - mBossakGamma / (2.0 * mBossakBeta)); for (unsigned int d=0; d<TDim; ++d) { vmesh_n1[d] = const_u * (umesh_n1[d] - umesh_n[d]) + const_v * vmesh_n[d] + const_a * amesh_n[d]; amesh_n1[d] = (1.0 / (timeStep * mBossakGamma)) * (vmesh_n1[d] - vmesh_n[d]) - ((1 - mBossakGamma) / mBossakGamma) * amesh_n[d]; } } rModelPart.GetCommunicator().SynchronizeVariable(MESH_VELOCITY); rModelPart.GetCommunicator().SynchronizeVariable(MESH_ACCELERATION); } /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ double mBossakBeta; double mBossakGamma; /*@} */ /**@name Protected Operators*/ /*@{ */ /*@} */ /**@name Protected Operations*/ /*@{ */ /**@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ private: /*@} */ /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class NodalUpdateNewmark */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_PARTITIONED_FSI_UTILITIES defined */
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef enum { BitwiseAndAssignmentOperator = 0xd9U, BitwiseOrAssignmentOperator, LeftShiftAssignmentOperator, RightShiftAssignmentOperator, PowerAssignmentOperator, ModuloAssignmentOperator, PlusAssignmentOperator, SubtractAssignmentOperator, MultiplyAssignmentOperator, DivideAssignmentOperator, IncrementAssignmentOperator, DecrementAssignmentOperator, LeftShiftOperator, RightShiftOperator, LessThanEqualOperator, GreaterThanEqualOperator, EqualOperator, NotEqualOperator, LogicalAndOperator, LogicalOrOperator, ExponentialNotation } FxOperator; struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression, ExceptionInfo *exception) { const Image *next; FxInfo *fx_info; ssize_t i; unsigned char fx_op[2]; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) memset(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(unsigned char) BitwiseAndAssignmentOperator; (void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op); *fx_op=(unsigned char) BitwiseOrAssignmentOperator; (void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op); *fx_op=(unsigned char) LeftShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op); *fx_op=(unsigned char) RightShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op); *fx_op=(unsigned char) PowerAssignmentOperator; (void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op); *fx_op=(unsigned char) ModuloAssignmentOperator; (void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op); *fx_op=(unsigned char) PlusAssignmentOperator; (void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op); *fx_op=(unsigned char) SubtractAssignmentOperator; (void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op); *fx_op=(unsigned char) MultiplyAssignmentOperator; (void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op); *fx_op=(unsigned char) DivideAssignmentOperator; (void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op); *fx_op=(unsigned char) IncrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"++",(char *) fx_op); *fx_op=(unsigned char) DecrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"--",(char *) fx_op); *fx_op=(unsigned char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op); *fx_op=(unsigned char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",(char *) fx_op); *fx_op=(unsigned char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op); *fx_op=(unsigned char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",(char *) fx_op); *fx_op=(unsigned char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",(char *) fx_op); *fx_op=(unsigned char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op); *fx_op=(unsigned char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op); *fx_op=(unsigned char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",(char *) fx_op); *fx_op=(unsigned char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",(char *) fx_op); /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info) { ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % const PixelChannel channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % double FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info, const char *symbol) { return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol)); } static inline MagickBooleanType SetFxSymbolValue( FxInfo *magick_restrict fx_info,const char *magick_restrict symbol, double const value) { double *object; object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol); if (object != (double *) NULL) { *object=value; return(MagickTrue); } object=(double *) AcquireMagickMemory(sizeof(*object)); if (object == (double *) NULL) { (void) ThrowMagickException(fx_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", fx_info->images->filename); return(MagickFalse); } *object=value; return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object)); } static double FxChannelStatistics(FxInfo *fx_info,Image *image, PixelChannel channel,const char *symbol,ExceptionInfo *exception) { ChannelType channel_mask; char key[MagickPathExtent]; const double *value; double statistic; const char *p; channel_mask=UndefinedChannel; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') { ssize_t option; option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1); if (option >= 0) { channel=(PixelChannel) option; channel_mask=SetPixelChannelMask(image,(ChannelType) (1UL << channel)); } } (void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=GetFxSymbolValue(fx_info,key); if (value != (const double *) NULL) { if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); return(QuantumScale*(*value)); } statistic=0.0; if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageDepth(image,exception); statistic=(double) depth; } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); statistic=kurtosis; } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); statistic=maxima; } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); statistic=mean; } if (LocaleNCompare(symbol,"median",6) == 0) { double median; (void) GetImageMedian(image,&median,exception); statistic=median; } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); statistic=minima; } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); statistic=skewness; } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); statistic=standard_deviation; } if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse) return(0.0); return(QuantumScale*statistic); } static double FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t, const ssize_t,const char *,const size_t,double *,ExceptionInfo *); static inline MagickBooleanType IsFxFunction(const char *expression, const char *name,const size_t length) { int c; size_t i; for (i=0; i <= length; i++) if (expression[i] == '\0') return(MagickFalse); c=expression[length]; if ((LocaleNCompare(expression,name,length) == 0) && ((isspace((int) ((unsigned char) c)) == 0) || (c == '('))) return(MagickTrue); return(MagickFalse); } static inline double FxGCD(const double alpha,const double beta) { if (alpha < beta) return(FxGCD(beta,alpha)); if (fabs(beta) < 0.001) return(alpha); return(FxGCD(beta,alpha-beta*floor(alpha/beta))); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, ExceptionInfo *exception) { char *q, symbol[MagickPathExtent]; const char *artifact, *p; const double *value; double alpha, beta; Image *image; MagickBooleanType status; PixelInfo pixel; PointInfo point; ssize_t i; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { char *subexpression; subexpression=AcquireString(expression); if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); i=(ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x=alpha; point.y=beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x+=alpha; point.y+=beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression=DestroyString(subexpression); } image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } i=GetImageIndexInList(image); GetPixelInfo(image,&pixel); status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); (void) status; if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MagickPathExtent]; size_t length; (void) CopyMagickString(name,p,MagickPathExtent); length=strlen(name); for (q=name+length-1; q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } q=name; if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') && (GetFxSymbolValue(fx_info,name) == (const double *) NULL)) { PixelInfo *color; color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name); if (color != (PixelInfo *) NULL) { pixel=(*color); p+=length; } else { MagickBooleanType status; status=QueryColorCompliance(name,AllCompliance,&pixel, fx_info->exception); if (status != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors, ConstantString(name),ClonePixelInfo(&pixel)); p+=length; } } } } (void) CopyMagickString(symbol,p,MagickPathExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return(QuantumScale*pixel.red); case GreenPixelChannel: return(QuantumScale*pixel.green); case BluePixelChannel: return(QuantumScale*pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return(1.0); alpha=(double) (QuantumScale*pixel.alpha); return(alpha); } case CompositePixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } case IndexPixelChannel: return(0.0); default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((QuantumScale*pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (IsFxFunction(symbol,"channel",7) != MagickFalse) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'E': case 'e': { if (LocaleCompare(symbol,"extent") == 0) { if (image->extent != 0) return((double) image->extent); return((double) GetBlobSize(image)); } break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"median",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); if (LocaleCompare(symbol,"printsize.x") == 0) return(PerceptibleReciprocal(image->resolution.x)*image->columns); if (LocaleCompare(symbol,"printsize.y") == 0) return(PerceptibleReciprocal(image->resolution.y)*image->rows); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) return((double) GetImageDepth(image,fx_info->exception)); break; } default: break; } value=GetFxSymbolValue(fx_info,symbol); if (value != (const double *) NULL) return(*value); artifact=GetImageArtifact(image,symbol); if (artifact != (const char *) NULL) return(StringToDouble(artifact,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UndefinedVariable","`%s'",symbol); (void) SetFxSymbolValue(fx_info,symbol,0.0); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; const char *subexpression; int c; size_t level; c=(-1); level=0; subexpression=(const char *) NULL; target=NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { expression+=5; break; } #endif if (IsFxFunction(expression,"atan2",5) != MagickFalse) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit((int) ((unsigned char) c)) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((IsFxFunction(expression,"j0",2) != MagickFalse) || (IsFxFunction(expression,"j1",2) != MagickFalse)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit((int) ((unsigned char) c)) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha((int) ((unsigned char) c)) != 0)) precedence=AdditionPrecedence; break; } case BitwiseAndAssignmentOperator: case BitwiseOrAssignmentOperator: case LeftShiftAssignmentOperator: case RightShiftAssignmentOperator: case PowerAssignmentOperator: case ModuloAssignmentOperator: case PlusAssignmentOperator: case SubtractAssignmentOperator: case MultiplyAssignmentOperator: case DivideAssignmentOperator: case IncrementAssignmentOperator: case DecrementAssignmentOperator: { precedence=AssignmentPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, const char *expression,const size_t depth,double *beta, ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } #define FxParseConditional(subexpression,sentinal,p,q) \ { \ p=subexpression; \ for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \ if (*q == '(') \ { \ for (q++; (*q != ')') && (*q != '\0'); q++); \ if (*q == '\0') \ break; \ } \ if (*q == '\0') \ { \ (void) ThrowMagickException(exception,GetMagickModule(), \ OptionError,"UnableToParseExpression","`%s'",subexpression); \ FxReturn(0.0); \ } \ if (strlen(q) == 1) \ *(q+1)='\0'; \ *q='\0'; \ } char *q, *subexpression; double alpha, gamma, sans, value; const char *p; *beta=0.0; sans=0.0; subexpression=AcquireString(expression); *subexpression='\0'; if (depth > FxMaxSubexpressionDepth) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) (~(size_t) *beta); FxReturn(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p, depth+1,beta,exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(PerceptibleReciprocal(*beta)*alpha); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha-(*beta)); } case BitwiseAndAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case BitwiseOrAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case RightShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PowerAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=pow(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ModuloAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=fmod(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PlusAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case SubtractAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case MultiplyAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DivideAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*PerceptibleReciprocal(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case IncrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DecrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); FxReturn(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); FxReturn(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { (void) CopyMagickString(subexpression,++p,MagickPathExtent-1); FxParseConditional(subexpression,':',p,q); if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(gamma); } case '=': { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1, beta,exception); FxReturn(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { size_t length; if (depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); length=CopyMagickString(subexpression,expression+1,MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (IsFxFunction(expression,"abs",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(acosh(alpha)); } #endif if (IsFxFunction(expression,"acos",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"airy",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(asinh(alpha)); } #endif if (IsFxFunction(expression,"asin",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(asin(alpha)); } if (IsFxFunction(expression,"alt",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (IsFxFunction(expression,"atan2",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atanh(alpha)); } #endif if (IsFxFunction(expression,"atan",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'C': case 'c': { if (IsFxFunction(expression,"ceil",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(ceil(alpha)); } if (IsFxFunction(expression,"clamp",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (IsFxFunction(expression,"cosh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(cosh(alpha)); } if (IsFxFunction(expression,"cos",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'D': case 'd': { if (IsFxFunction(expression,"debug",5) != MagickFalse) { const char *type; size_t length; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); switch (fx_info->images->colorspace) { case CMYKColorspace: { switch (channel) { case CyanPixelChannel: type="cyan"; break; case MagentaPixelChannel: type="magenta"; break; case YellowPixelChannel: type="yellow"; break; case AlphaPixelChannel: type="alpha"; break; case BlackPixelChannel: type="black"; break; default: type="unknown"; break; } break; } case GRAYColorspace: { switch (channel) { case RedPixelChannel: type="gray"; break; case AlphaPixelChannel: type="alpha"; break; default: type="unknown"; break; } break; } default: { switch (channel) { case RedPixelChannel: type="red"; break; case GreenPixelChannel: type="green"; break; case BluePixelChannel: type="blue"; break; case AlphaPixelChannel: type="alpha"; break; default: type="unknown"; break; } break; } } *subexpression='\0'; length=1; if (strlen(expression) > 6) length=CopyMagickString(subexpression,expression+6, MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: " "%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),alpha); FxReturn(alpha); } if (IsFxFunction(expression,"do",2) != MagickFalse) { size_t length; /* Parse do(expression,condition test). */ length=CopyMagickString(subexpression,expression+3, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; } FxReturn(alpha); } if (IsFxFunction(expression,"drc",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (IsFxFunction(expression,"erf",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(erf(alpha)); } #endif if (IsFxFunction(expression,"exp",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (IsFxFunction(expression,"floor",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"for",3) != MagickFalse) { double sans = 0.0; size_t length; /* Parse for(initialization, condition test, expression). */ length=CopyMagickString(subexpression,expression+4, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); } FxReturn(alpha); } break; } case 'G': case 'g': { if (IsFxFunction(expression,"gauss",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI)); } if (IsFxFunction(expression,"gcd",3) != MagickFalse) { double gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gcd=FxGCD(alpha,*beta); FxReturn(gcd); } if (LocaleCompare(expression,"g") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleCompare(expression,"hue") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"hypot",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'I': case 'i': { if (IsFxFunction(expression,"if",2) != MagickFalse) { double sans = 0.0; size_t length; length=CopyMagickString(subexpression,expression+3, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); if (fabs(alpha) >= MagickEpsilon) alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(alpha); } if (LocaleCompare(expression,"intensity") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"int",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"isnan",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); #if defined(MAGICKCORE_HAVE_J0) if (IsFxFunction(expression,"j0",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"j1",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"jinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha))); } #endif break; } case 'L': case 'l': { if (IsFxFunction(expression,"ln",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(log(alpha)); } if (IsFxFunction(expression,"logtwo",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn(log10(alpha)/log10(2.0)); } if (IsFxFunction(expression,"log",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) FxReturn(QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (IsFxFunction(expression,"max",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (IsFxFunction(expression,"min",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha < *beta ? alpha : *beta); } if (IsFxFunction(expression,"mod",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta)); } if (LocaleCompare(expression,"m") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'N': case 'n': { if (IsFxFunction(expression,"not",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression,"o") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression,"pi") == 0) FxReturn(MagickPI); if (IsFxFunction(expression,"pow",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) FxReturn(QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (IsFxFunction(expression,"rand",4) != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (IsFxFunction(expression,"round",5) != MagickFalse) { /* Round the fraction to nearest integer. */ alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if ((alpha-floor(alpha)) < (ceil(alpha)-alpha)) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"r") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"sign",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (IsFxFunction(expression,"sinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0) FxReturn(1.0); FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha)); } if (IsFxFunction(expression,"sinh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sinh(alpha)); } if (IsFxFunction(expression,"sin",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(sin(alpha)); } if (IsFxFunction(expression,"sqrt",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sqrt(alpha)); } if (IsFxFunction(expression,"squish",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'T': case 't': { if (IsFxFunction(expression,"tanh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(tanh(alpha)); } if (IsFxFunction(expression,"tan",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) FxReturn(0.0); if (IsFxFunction(expression,"trunc",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'W': case 'w': { if (IsFxFunction(expression,"while",5) != MagickFalse) { size_t length; /* Parse while(condition test, expression). */ length=CopyMagickString(subexpression,expression+6, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1, beta,exception); } FxReturn(alpha); } if (LocaleCompare(expression,"w") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } default: break; } subexpression=DestroyString(subexpression); q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception); FxReturn(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); fx_info->file=file; return(status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, double *alpha,ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; double alpha; FxInfo **fx_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) memset(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression,exception); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view, *image_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows,1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel(fx_image,channel,p[i],q); continue; } alpha=0.0; (void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha, exception); q[i]=ClampToQuantum(QuantumRange*alpha); } p+=GetPixelChannels(image); q+=GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FxImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); image_view=DestroyCacheView(image_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); }
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/annotate.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/decorate.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/fx-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/log.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/opencl-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/transform.h" #include "magick/utility.h" /* Define declarations. */ typedef enum { BitwiseAndAssignmentOperator = 0xd9U, BitwiseOrAssignmentOperator, LeftShiftAssignmentOperator, RightShiftAssignmentOperator, PowerAssignmentOperator, ModuloAssignmentOperator, PlusAssignmentOperator, SubtractAssignmentOperator, MultiplyAssignmentOperator, DivideAssignmentOperator, IncrementAssignmentOperator, DecrementAssignmentOperator, LeftShiftOperator, RightShiftOperator, LessThanEqualOperator, GreaterThanEqualOperator, EqualOperator, NotEqualOperator, LogicalAndOperator, LogicalOrOperator, ExponentialNotation } FxOperator; struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % */ MagickExport FxInfo *AcquireFxInfo(const Image *images,const char *expression) { const Image *next; FxInfo *fx_info; ssize_t i; unsigned char fx_op[2]; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) memset(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,fx_info->exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(unsigned char) BitwiseAndAssignmentOperator; (void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op); *fx_op=(unsigned char) BitwiseOrAssignmentOperator; (void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op); *fx_op=(unsigned char) LeftShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op); *fx_op=(unsigned char) RightShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op); *fx_op=(unsigned char) PowerAssignmentOperator; (void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op); *fx_op=(unsigned char) ModuloAssignmentOperator; (void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op); *fx_op=(unsigned char) PlusAssignmentOperator; (void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op); *fx_op=(unsigned char) SubtractAssignmentOperator; (void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op); *fx_op=(unsigned char) MultiplyAssignmentOperator; (void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op); *fx_op=(unsigned char) DivideAssignmentOperator; (void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op); *fx_op=(unsigned char) IncrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"++",(char *) fx_op); *fx_op=(unsigned char) DecrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"--",(char *) fx_op); *fx_op=(unsigned char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op); *fx_op=(unsigned char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",(char *) fx_op); *fx_op=(unsigned char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op); *fx_op=(unsigned char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",(char *) fx_op); *fx_op=(unsigned char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",(char *) fx_op); *fx_op=(unsigned char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op); *fx_op=(unsigned char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op); *fx_op=(unsigned char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",(char *) fx_op); *fx_op=(unsigned char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",(char *) fx_op); /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickExport FxInfo *DestroyFxInfo(FxInfo *fx_info) { ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, % const ChannelType channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,double *alpha, % Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static inline const double *GetFxSymbolValue(FxInfo *fx_info,const char *symbol) { return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol)); } static inline MagickBooleanType SetFxSymbolValue( FxInfo *magick_restrict fx_info,const char *magick_restrict symbol, const double value) { double *object; object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol); if (object != (double *) NULL) { *object=value; return(MagickTrue); } object=(double *) AcquireMagickMemory(sizeof(*object)); if (object == (double *) NULL) { (void) ThrowMagickException(fx_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", fx_info->images->filename); return(MagickFalse); } *object=value; return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object)); } static double FxChannelStatistics(FxInfo *fx_info,const Image *image, ChannelType channel,const char *symbol,ExceptionInfo *exception) { char channel_symbol[MaxTextExtent], key[MaxTextExtent]; const double *value; double statistic; const char *p; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; *channel_symbol='\0'; if (*p == '.') { ssize_t option; (void) CopyMagickString(channel_symbol,p+1,MaxTextExtent); option=ParseCommandOption(MagickChannelOptions,MagickTrue,channel_symbol); if (option >= 0) channel=(ChannelType) option; } (void) FormatLocaleString(key,MaxTextExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=GetFxSymbolValue(fx_info,key); if (value != (const double *) NULL) return(QuantumScale*(*value)); statistic=0.0; if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageChannelDepth(image,channel,exception); statistic=(double) depth; } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness, exception); statistic=kurtosis; } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageChannelRange(image,channel,&minima,&maxima,exception); statistic=maxima; } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageChannelMean(image,channel,&mean,&standard_deviation, exception); statistic=mean; } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageChannelRange(image,channel,&minima,&maxima,exception); statistic=minima; } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness, exception); statistic=skewness; } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageChannelMean(image,channel,&mean,&standard_deviation, exception); statistic=standard_deviation; } if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse) return(0.0); return(QuantumScale*statistic); } static double FxEvaluateSubexpression(FxInfo *,const ChannelType,const ssize_t, const ssize_t,const char *,const size_t,double *,ExceptionInfo *); static inline MagickBooleanType IsFxFunction(const char *expression, const char *name,const size_t length) { int c; size_t i; for (i=0; i <= length; i++) if (expression[i] == '\0') return(MagickFalse); c=expression[length]; if ((LocaleNCompare(expression,name,length) == 0) && ((isspace((int) ((unsigned char) c)) == 0) || (c == '('))) return(MagickTrue); return(MagickFalse); } static inline double FxGCD(const double alpha,const double beta) { if (alpha < beta) return(FxGCD(beta,alpha)); if (fabs(beta) < 0.001) return(alpha); return(FxGCD(beta,alpha-beta*floor(alpha/beta))); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const ChannelType channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, ExceptionInfo *exception) { char *q, symbol[MaxTextExtent]; const char *artifact, *p; const double *value; double alpha, beta; Image *image; MagickBooleanType status; MagickPixelPacket pixel; PointInfo point; ssize_t i; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { char *subexpression; subexpression=AcquireString(expression); if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); i=(ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x=alpha; point.y=beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x+=alpha; point.y+=beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression=DestroyString(subexpression); } image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } i=GetImageIndexInList(image); GetMagickPixelPacket(image,&pixel); status=InterpolateMagickPixelPacket(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); (void) status; if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MaxTextExtent]; size_t length; (void) CopyMagickString(name,p,MaxTextExtent); length=strlen(name); for (q=name+length-1; q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } q=name; if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') && (GetFxSymbolValue(fx_info,name) == (const double *) NULL)) { MagickPixelPacket *color; color=(MagickPixelPacket *) GetValueFromSplayTree(fx_info->colors, name); if (color != (MagickPixelPacket *) NULL) { pixel=(*color); p+=length; } else if (QueryMagickColor(name,&pixel,fx_info->exception) != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors,ConstantString(name), CloneMagickPixelPacket(&pixel)); p+=length; } } } (void) CopyMagickString(symbol,p,MaxTextExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedChannel: return(QuantumScale*pixel.red); case GreenChannel: return(QuantumScale*pixel.green); case BlueChannel: return(QuantumScale*pixel.blue); case OpacityChannel: { double alpha; if (pixel.matte == MagickFalse) return(1.0); alpha=(double) (QuantumScale*GetPixelAlpha(&pixel)); return(alpha); } case IndexChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.index); } case DefaultChannels: return(QuantumScale*GetMagickPixelIntensity(image,&pixel)); default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((double) (QuantumScale*GetPixelAlpha(&pixel))); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (IsFxFunction(symbol,"channel",7) != MagickFalse) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case OpacityChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BlueChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case OpacityChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case IndexChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'E': case 'e': { if (LocaleCompare(symbol,"extent") == 0) { if (image->extent != 0) return((double) image->extent); return((double) GetBlobSize(image)); } break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.index); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->x_resolution); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->y_resolution); if (LocaleCompare(symbol,"intensity") == 0) return(QuantumScale*GetMagickPixelIntensity(image,&pixel)); if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminance; luminance=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminance); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.opacity); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); if (LocaleCompare(symbol,"printsize.x") == 0) return(PerceptibleReciprocal(image->x_resolution)*image->columns); if (LocaleCompare(symbol,"printsize.y") == 0) return(PerceptibleReciprocal(image->y_resolution)*image->rows); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->x_resolution); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->y_resolution); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) { double depth; depth=(double) GetImageChannelDepth(image,channel,fx_info->exception); return(depth); } break; } default: break; } value=GetFxSymbolValue(fx_info,symbol); if (value != (const double *) NULL) return(*value); artifact=GetImageArtifact(image,symbol); if (artifact != (const char *) NULL) return(StringToDouble(artifact,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UndefinedVariable","`%s'",symbol); (void) SetFxSymbolValue(fx_info,symbol,0.0); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; const char *subexpression; int c; size_t level; c=(-1); level=0; subexpression=(const char *) NULL; target=NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { expression+=5; break; } #endif if (IsFxFunction(expression,"atan2",5) != MagickFalse) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit((int) ((unsigned char) c)) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((IsFxFunction(expression,"j0",2) != MagickFalse) || (IsFxFunction(expression,"j1",2) != MagickFalse)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit((int) ((unsigned char) c)) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha((int) ((unsigned char) c)) != 0)) precedence=AdditionPrecedence; break; } case BitwiseAndAssignmentOperator: case BitwiseOrAssignmentOperator: case LeftShiftAssignmentOperator: case RightShiftAssignmentOperator: case PowerAssignmentOperator: case ModuloAssignmentOperator: case PlusAssignmentOperator: case SubtractAssignmentOperator: case MultiplyAssignmentOperator: case DivideAssignmentOperator: case IncrementAssignmentOperator: case DecrementAssignmentOperator: { precedence=AssignmentPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info,const ChannelType channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, double *beta,ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } #define FxParseConditional(subexpression,sentinal,p,q) \ { \ p=subexpression; \ for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \ if (*q == '(') \ { \ for (q++; (*q != ')') && (*q != '\0'); q++); \ if (*q == '\0') \ break; \ } \ if (*q == '\0') \ { \ (void) ThrowMagickException(exception,GetMagickModule(), \ OptionError,"UnableToParseExpression","`%s'",subexpression); \ FxReturn(0.0); \ } \ if (strlen(q) == 1) \ *(q+1)='\0'; \ *q='\0'; \ } char *q, *subexpression; double alpha, gamma, sans, value; const char *p; *beta=0.0; sans=0.0; subexpression=AcquireString(expression); *subexpression='\0'; if (depth > FxMaxSubexpressionDepth) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) (~(size_t) *beta); FxReturn(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p, depth+1,beta,exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(PerceptibleReciprocal(*beta)*alpha); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha-(*beta)); } case BitwiseAndAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case BitwiseOrAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case RightShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PowerAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=pow(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ModuloAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=fmod(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PlusAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case SubtractAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case MultiplyAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DivideAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*PerceptibleReciprocal(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case IncrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DecrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); FxReturn(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); FxReturn(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { double gamma; (void) CopyMagickString(subexpression,++p,MaxTextExtent-1); FxParseConditional(subexpression,':',p,q); if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(gamma); } case '=': { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1, beta,exception); FxReturn(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { size_t length; if (depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); length=CopyMagickString(subexpression,expression+1,MaxTextExtent); if (length != 0) subexpression[length-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (IsFxFunction(expression,"abs",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(acosh(alpha)); } #endif if (IsFxFunction(expression,"acos",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"airy",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(asinh(alpha)); } #endif if (IsFxFunction(expression,"asin",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(asin(alpha)); } if (IsFxFunction(expression,"alt",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (IsFxFunction(expression,"atan2",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atanh(alpha)); } #endif if (IsFxFunction(expression,"atan",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'C': case 'c': { if (IsFxFunction(expression,"ceil",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(ceil(alpha)); } if (IsFxFunction(expression,"clamp",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (IsFxFunction(expression,"cosh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(cosh(alpha)); } if (IsFxFunction(expression,"cos",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'D': case 'd': { if (IsFxFunction(expression,"debug",5) != MagickFalse) { const char *type; size_t length; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); switch (fx_info->images->colorspace) { case CMYKColorspace: { switch (channel) { case CyanChannel: type="cyan"; break; case MagentaChannel: type="magenta"; break; case YellowChannel: type="yellow"; break; case AlphaChannel: type="alpha"; break; case BlackChannel: type="black"; break; default: type="unknown"; break; } break; } case GRAYColorspace: { switch (channel) { case RedChannel: type="gray"; break; case AlphaChannel: type="alpha"; break; default: type="unknown"; break; } break; } default: { switch (channel) { case RedChannel: type="red"; break; case GreenChannel: type="green"; break; case BlueChannel: type="blue"; break; case AlphaChannel: type="alpha"; break; default: type="unknown"; break; } break; } } *subexpression='\0'; length=1; if (strlen(expression) > 6) length=CopyMagickString(subexpression,expression+6,MaxTextExtent); if (length != 0) subexpression[length-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file, "%s[%.20g,%.20g].%s: %s=%.*g\n",fx_info->images->filename, (double) x,(double) y,type,subexpression,GetMagickPrecision(), (double) alpha); FxReturn(alpha); } if (IsFxFunction(expression,"do",2) != MagickFalse) { size_t length; /* Parse do(expression,condition test). */ length=CopyMagickString(subexpression,expression+6, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; } FxReturn(alpha); } if (IsFxFunction(expression,"drc",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (IsFxFunction(expression,"erf",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(erf(alpha)); } #endif if (IsFxFunction(expression,"exp",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (IsFxFunction(expression,"floor",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"for",3) != MagickFalse) { double sans = 0.0; size_t length; /* Parse for(initialization, condition test, expression). */ length=CopyMagickString(subexpression,expression+4, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); } FxReturn(alpha); } break; } case 'G': case 'g': { if (IsFxFunction(expression,"gauss",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI)); } if (IsFxFunction(expression,"gcd",3) != MagickFalse) { double gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); if (IsNaN(alpha)) FxReturn(alpha); gcd=FxGCD(alpha,*beta); FxReturn(gcd); } if (LocaleCompare(expression,"g") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleCompare(expression,"hue") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"hypot",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'I': case 'i': { if (IsFxFunction(expression,"if",2) != MagickFalse) { double sans = 0.0; size_t length; /* Parse if(condition test, true-expression, false-expression). */ length=CopyMagickString(subexpression,expression+3, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); if (fabs(alpha) >= MagickEpsilon) alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(alpha); } if (LocaleCompare(expression,"intensity") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"int",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"isnan",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); #if defined(MAGICKCORE_HAVE_J0) if (IsFxFunction(expression,"j0",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"j1",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"jinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha))); } #endif break; } case 'L': case 'l': { if (IsFxFunction(expression,"ln",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(log(alpha)); } if (IsFxFunction(expression,"logtwo",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn(log10(alpha)/log10(2.0)); } if (IsFxFunction(expression,"log",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) FxReturn((double) QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (IsFxFunction(expression,"max",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (IsFxFunction(expression,"min",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha < *beta ? alpha : *beta); } if (IsFxFunction(expression,"mod",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta)); } if (LocaleCompare(expression,"m") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'N': case 'n': { if (IsFxFunction(expression,"not",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression,"o") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression,"pi") == 0) FxReturn(MagickPI); if (IsFxFunction(expression,"pow",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) FxReturn((double) QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (IsFxFunction(expression,"rand",4) != MagickFalse) { double alpha; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (IsFxFunction(expression,"round",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if ((alpha-floor(alpha)) < (ceil(alpha)-alpha)) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"r") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"sign",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (IsFxFunction(expression,"sinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0) FxReturn(1.0); FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha)); } if (IsFxFunction(expression,"sinh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sinh(alpha)); } if (IsFxFunction(expression,"sin",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(sin(alpha)); } if (IsFxFunction(expression,"sqrt",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sqrt(alpha)); } if (IsFxFunction(expression,"squish",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'T': case 't': { if (IsFxFunction(expression,"tanh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(tanh(alpha)); } if (IsFxFunction(expression,"tan",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) FxReturn(0.0); if (IsFxFunction(expression,"trunc",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'W': case 'w': { if (IsFxFunction(expression,"while",5) != MagickFalse) { size_t length; /* Parse while(condition,expression). */ length=CopyMagickString(subexpression,expression+6, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); } FxReturn(alpha); } if (LocaleCompare(expression,"w") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } default: break; } q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception); FxReturn(alpha); } MagickExport MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception); fx_info->file=file; return(status); } MagickExport MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const ChannelType channel,const ssize_t x,const ssize_t y,double *alpha, ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % Image *FxImageChannel(const Image *image,const ChannelType channel, % const char *expression,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; double alpha; FxInfo **fx_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) memset(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { Image *fx_image; fx_image=FxImageChannel(image,GrayChannel,expression,exception); return(fx_image); } MagickExport Image *FxImageChannel(const Image *image,const ChannelType channel, const char *expression,ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass) == MagickFalse) { InheritException(exception,&fx_image->exception); fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows, \ GlobExpression(fx_info[0]->expression,"debug(",MagickTrue) == 0 ? 1 : 0) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); double alpha; IndexPacket *magick_restrict fx_indexes; ssize_t x; PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } fx_indexes=GetCacheViewAuthenticIndexQueue(fx_view); alpha=0.0; for (x=0; x < (ssize_t) fx_image->columns; x++) { if ((channel & RedChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],RedChannel,x,y, &alpha,exception); SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } if ((channel & GreenChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],GreenChannel,x,y, &alpha,exception); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } if ((channel & BlueChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],BlueChannel,x,y, &alpha,exception); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } if ((channel & OpacityChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],OpacityChannel,x,y, &alpha,exception); if (image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum((MagickRealType) QuantumRange* alpha)); else SetPixelOpacity(q,ClampToQuantum((MagickRealType) (QuantumRange- QuantumRange*alpha))); } if (((channel & IndexChannel) != 0) && (fx_image->colorspace == CMYKColorspace)) { (void) FxEvaluateChannelExpression(fx_info[id],IndexChannel,x,y, &alpha,exception); SetPixelIndex(fx_indexes+x,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } q++; } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FxImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); }
tool.c
#include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <stdlib.h> #include <dirent.h> #include <limits.h> #include <string.h> #include <stdio.h> #include <limits.h> #include <time.h> #include <sys/time.h> #ifndef __clang__ #include <omp.h> #endif #include "tool.h" #include "bloom.h" #include "file_dir.h" /* In the following functions, there are two different read formats and two read scanning mode 'q' means fastq format; 'a' means fasta format; 'n' means normal mode; 'r' means reverse compliment mode. One read will be scaned in normal mode first and reverse complement mode next. Only if the read has been identified as a hit read, the reverse complement mode will be skipped. */ void isodate(char* buf) { /* Borrowed from: https://raw.github.com/jordansissel/experiments/bd58235b99f608472212a5933b52fca9cf1cac8d/c/time/iso8601.c */ struct timeval tv; struct tm tm; char timestamp[] = "YYYY-MM-ddTHH:mm:ss.SSS+0000"; /* Get the current time at high precision; could also use clock_gettime() for * even higher precision times if we want it. */ gettimeofday(&tv, NULL); /* convert to time to 'struct tm' for use with strftime */ localtime_r(&tv.tv_sec, &tm); /* format the time */ strftime(timestamp, sizeof(timestamp), "%Y-%m-%dT%H:%M:%S.000%z", &tm); /* but, since strftime() can't subsecond precision, we have to hack it * in manually. '20' is the string offset of the subsecond value in our * timestamp string. Also, because sprintf always writes a null, we have to * write the subsecond value as well as the rest of the string already there. */ sprintf(timestamp + 20, "%03d%s", tv.tv_usec / 1000, timestamp + 23); sprintf(buf, "%s", timestamp); } /*sub function for quick pass*/ int total_subscan (bloom *bl, F_set *File_head, char *begin, char *start_point, int read_length, int true_length, float tole_rate, char mode, char type) { if (mode == 'n') { #pragma omp atomic File_head->all_k += (true_length); } int result = 0; while (read_length > 0) { if (read_length >= bl->k_mer) { read_length -= bl->k_mer; } else { start_point -= (bl->k_mer-read_length); read_length = 0; } if (bloom_check (bl, start_point)) { result = total_full_check (bl, begin, true_length, tole_rate, File_head); if (result > 0) { if (mode == 'r') free(begin); return result; } else if (mode == 'n') break; } start_point+=bl->k_mer; } if (mode == 'r') { free(begin); return 0; } else { if (type=='q') return fastq_read_check (begin, true_length, 'r', bl, tole_rate, File_head); else return fasta_read_check (begin, true_length, 'r', bl, tole_rate, File_head); } } /*quick pass for fastq reads using k-mer and 0 overlap if one hit exists, pass the read to total_subscan to do full check and return the value, other wise return 0 */ int fastq_read_check (char *begin, int length, char mode, bloom * bl, float tole_rate, F_set * File_head) { /* begin is the start point of the read in the string. Hence length is read length. mode contains 'r' and 'n' as mentioned above. bl is a structor containing bloom filter. tole_rate is the threshold/match cutoff value File_head is a structor containing all the info for the query, including statistic info. Same principle applies to fasta_read_check function. */ if (mode == 'r') // make a copy of the read for reverse compliment process { char *re_compliment = (char *) calloc (length+1, sizeof (char)); re_compliment[length]='\0'; memcpy(re_compliment, begin, length); begin = re_compliment; rev_trans (begin,length); } // initialization int read_length = length; char *start_point = begin; if (mode == 'n') { normal_lower(start_point,length); //normalize the whole read tddo the lower case } return total_subscan (bl, File_head, begin, start_point, read_length, length, tole_rate, mode, 'q'); } /*full check for fastq or fasta sequence with k-mer and k-1 overlap return a value that larger than match cut off, otherwise, return 0 */ int total_full_check (bloom * bl, char *start_point, int length, float tole_rate, F_set * File_head) { int read_length = length, count = 0, match_s = 0, mark = 1, prev = 0, conse = 0, match_time = 0; float result; while (read_length >= bl->k_mer) { if (count >= bl->k_mer) { mark = 1; count = 0; } if (bloom_check (bl, start_point)) { match_time++; if (prev == 1) conse++; else { conse += bl->k_mer; prev = 1; } if (mark == 1) { match_s += (bl->k_mer - 1); mark = 0; } else match_s++; } else { prev = 0; } count++; start_point++; read_length--; } // end while result = (float)(match_time*bl->k_mer+conse)/(float)(bl->k_mer*length+length-bl->dx); //result = (float) (match_time * bl->k_mer + conse) / (float) (length * bl->k_mer - 2 * bl->dx + length - bl->k_mer + 1); #pragma omp atomic File_head->hits += match_time; if (result >= tole_rate) return match_s; else return 0; } /*fasta read quick check using k-mer and 0 overlap*/ int fasta_read_check (char *begin, int length, char mode, bloom * bl, float tole_rate, F_set * File_head) { // skip id line char *start_point = NULL; //int result; int true_length = 0, read_length = 0; if (!begin || *begin == '>') return 1; // in case the read is empty if (mode == 'n') start_point = fa_count (begin, length); else start_point = begin; true_length = strlen(start_point); read_length = true_length; if (mode == 'r') // make a copy of the read for reverse compliment process { rev_trans (start_point,true_length); // reverse compliment process } if (mode == 'n') begin = start_point; normal_lower(start_point,true_length); return total_subscan (bl, File_head, begin, start_point, read_length, true_length, tole_rate, mode, 'a'); } /*Parallel job distribution*/ int get_parainfo (char *full, Queue * head, char type) { #ifdef DEBUG printf ("distributing...\n"); #endif char *previous = NULL; char *temp = full; #ifndef __clang__ int cores = omp_get_num_procs (); #else int cores = 1; #endif short add = 0; int offset = 0; Queue *pos = head; int length = 0; if (full != NULL) { offset = strlen(full) / cores; } if (type == '>') { for (add = 0; add < cores; add++) { Queue *x = NEW (Queue); if (add == 0 && *full != '>') { temp = strchr (full, '>'); //drop the possible fragment } if (add != 0) temp = strchr (full + offset * add, '>'); x->location = temp; x->number = &add; x->next = pos->next; pos->next = x; pos = pos->next; } } else { for (add = 0; add < cores; add++) { char *tx = strchr(full,'\n'); length = strchr(tx+1,'\n')-(tx+1); Queue *x = NEW (Queue); x->location = NULL; if (add != 0) { temp = fastq_relocate(full, offset*add, length); } if (previous!=temp) { previous = temp; x->location = temp; x->number = &add; x->next = pos->next; pos->next = x; pos = pos->next; } } } return type; } /*reads skipping process for proportional check*/ char *jump (char *target, char type, float sampling_rate) { float seed = rand () % 10; if (seed >= (float) sampling_rate * 10) { char *point; if (type == '>') point = strchr (target + 1, '>'); //point to > else { //point = strstr (target + 1, "\n+") + 1; //point to + //for (x=0;x<4;x++) point = strchr (target, '\n') + 1; point = strchr (point, '\n') + 1; point = strchr (point, '\n') + 1; point = strchr (point, '\n') + 1; //point to quality line } if (point) target = point; } return target; } /*relocate the starting points (correct @ positions) for fastq files*/ char *fastq_relocate (char *data, int offset, int length) { char *target = NULL; int current_length = 0, read_length = 0; if (data != NULL && offset != 0) { target = strstr (data + offset, "\n+"); if (!target) return NULL; else { current_length = strchr (target + 1, '\n') - target + 1; read_length = fq_read_length (target - 1); if (read_length != current_length) target = strchr (target + 1, '\n') + 1; if (target != NULL) target = strchr (target + 1, '\n') + 1; } } return target; } /*get read length for fastq file*/ int fq_read_length (char *data) { char *origin = data; while (*data != '\n') data--; return origin - data; } /*check the head of the file and see if it is standard*/ char *check_fmt (Queue *info, Queue *tail, char *start_point, char type) { char *next_job = NULL; if(info->location[0] != type) { return next_job; } else if(info->next != tail && info->next->location != NULL) { next_job = info->next->location; } else { // XXX: Does this account for OSX-style newlines? next_job is always NULL on OSX fastq files. next_job = strchr (start_point, '\0'); if (next_job[-1] == '\n' && next_job[-2] == '\n') next_job -= 1; else if (next_job[-4] == '\r' && next_job[-3] == '\n') next_job -= 2; } return next_job; } /*get the correct starting point*/ char *get_right_sp (char *start_point ,char type) { start_point = strchr(start_point,'\n')+1; return start_point; } /*count useful characters for fasta reads*/ char *fa_count (char *start, int length) { char *reads = (char *) calloc (length+1, sizeof(char)); char *p = reads; // conservatively allocate memory while (length>0) { if (*start!='\n') { p[0]=start[0]; p++; } start++; length--; } p[0] = '\0'; return reads; }
BlockMultiplierMatrixUtil.h
// // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE.md file in the project root for full licence information. // #pragma once #define NOMINMAX #include <fstream> #include <functional> #include <iostream> #include <limits> #include <string.h>//for memset #include "BlockMultiplierPlatform.h" namespace Microsoft { namespace MSR { namespace CNTK { template<typename ScalarT> void DumpMatrix(ScalarT* pDumpMe, int rows, int cols, std::ostream* pStream, int rowMax = std::numeric_limits<int>::max(), int colMax = std::numeric_limits<int>::max()) { for (int r = 0; r < std::min(rows, rowMax); ++r) { for (int c = 0; c < std::min(cols, colMax); ++c) { (*pStream) << pDumpMe[r * cols + c] << " "; } (*pStream) << std::endl; } } // Turn a row+col into an absolute offset FORCEINLINE int RowColToOffset(int idxRow, int idxCol, int numCols) { return idxRow * numCols + idxCol; } template<typename ScalarT>struct TransposeArgs { int r; ScalarT* transposeMe; ScalarT* transposed; int origRows; int origCols; }; template<class ScalarT>void TransposeThread(TransposeArgs<ScalarT> ta) { for (int c = 0; c < ta.origCols; ++c) { //new c,r = old r,c int oldOffset = RowColToOffset(ta.r, c, ta.origCols); int newOffset = RowColToOffset(c, ta.r, ta.origRows); ta.transposed[newOffset] = ta.transposeMe[oldOffset]; } } template<typename ScalarT> class TransposeThreadType { public: void operator()(TransposeArgs<ScalarT> ta) { TransposeThread<ScalarT>(ta); } }; template<class ScalarT> void Transpose(ScalarT* transposeMe, ScalarT* transposed, int origRows, int origCols) { #pragma omp parallel for for (int r = 0; r < origRows; ++r) { for (int c = 0; c < origCols; ++c) { int oldOffset = RowColToOffset(r, c, origCols); int newOffset = RowColToOffset(c, r, origRows); transposed[newOffset] = transposeMe[oldOffset]; } } } template<typename ScalarT> ScalarT* CreateAlignedMatrix(int m, int n, ScalarT initVal, int alignment = 64) { ScalarT* ret = (ScalarT*)ALIGNED_ALLOC(sizeof(ScalarT) * (m * n), alignment); if (initVal != 0) { for (int i = 0; i < m * n; ++i) { ret[i] = initVal;// +i; } } else { memset(ret, 0, sizeof(ScalarT) * m * n); } return ret; } template<typename ScalarT> void FreeAlignedMatrix(ScalarT* destroyMe) { ALIGNED_FREE(destroyMe); } template<typename ScalarT> double MeanSquaredError(ScalarT* lhs, ScalarT* rhs, int m, int n) { double accumulatedError = 0.0; for (int r = 0; r < m; ++r) { for(int c = 0; c < n; ++c) { double err = ((double)lhs[RowColToOffset(r, c, n)] - (double)rhs[RowColToOffset(r, c, n)]); err = err * err; accumulatedError += err; } } return accumulatedError / (double)(m * n); } template<typename ScalarT> void RandInitIntMatrix(ScalarT* initMe, int m, int n, ScalarT bound) { ScalarT* curr = initMe; for (int i = 0; i < m * n; ++i) { *curr++ = rand() % bound; } } //Helper fn for tests template<typename ScalarT>static void RandInitFloatMatrix(ScalarT* initMe, int m, int n, ScalarT min, ScalarT max) { for (int i = 0; i < m * n; ++i) { initMe[i] = min + ((max - min) * ((ScalarT)rand() / RAND_MAX)); } } //Viewing matrices and troubleshooting is a lot easier in Octave. //Utility fn for exporting to Octave format template<typename ScalarT>void DumpMatrixToOctaveFormat(const ScalarT* dumpMe, int rows, int cols, const char* fileName, const char* id) { std::ofstream ofs(fileName); ofs << "# Created by gemmbenchmark" << std::endl << "# name: " << id << std::endl << "# type: matrix" << std::endl << "# rows: " << rows << std::endl << "# columns: " << cols << std::endl; for (int r = 0; r < rows; ++r) { for (int c = 0; c < cols; ++c) { ofs << ' ' << (ScalarT)(dumpMe[(cols * r) + c]); } ofs << std::endl; } } }}} //End namespaces
force.c
#include <stdio.h> #include <string.h> #include <stdint.h> #include <omp.h> #include <math.h> #include <ktime.h> #include <geometry.h> #ifdef __USE_HW_COUNTER #include <perf.h> #include <kperf.h> #endif #include <phy.h> /* Calculates the forces (Drag FORCE, LIFT FORCE, and the momentum) */ void compute_force(struct force *restrict f) { #ifdef __USE_HW_COUNTER const struct fd fd = f->perf_counters->fd; struct counters start; perf_read(fd, &start); const uint64_t icycle = __rdtsc(); #endif struct ktime ktime; setktime(&ktime); const struct geometry *restrict g = f->g; const struct ivals * iv = f->iv; const double *restrict q = f->q; double lift = 0.f; double drag = 0.f; double momn = 0.f; const uint32_t snfc = g->s->snfc; const uint32_t *restrict snfic = g->s->snfic; uint32_t i; for(i = 0; i < snfc; i++) { uint32_t if0 = snfic[i]; uint32_t if1 = snfic[i+1]; uint32_t j; #pragma omp parallel for reduction(+: lift, drag, momn) for(j = if0; j < if1; j++) { uint32_t n0 = g->b->snfptr->n0[j]; uint32_t n1 = g->b->snfptr->n1[j]; uint32_t n2 = g->b->snfptr->n2[j]; double x0 = g->n->xyz->x0[n0]; double y0 = g->n->xyz->x1[n0]; double z0 = g->n->xyz->x2[n0]; double x1 = g->n->xyz->x0[n1]; double y1 = g->n->xyz->x1[n1]; double z1 = g->n->xyz->x2[n1]; double x2 = g->n->xyz->x0[n2]; double y2 = g->n->xyz->x1[n2]; double z2 = g->n->xyz->x2[n2]; /* Delta coordinates in all directions */ double ax = x1 - x0; double ay = y1 - y0; double az = z1 - z0; double bx = x2 - x0; double by = y2 - y0; double bz = z2 - z0; /* Norm points outward, away from grid interior. Norm magnitude is area of surface triangle. */ double xnorm = ay * bz; xnorm -= az * by; xnorm = -0.5f * xnorm; double ynorm = ax * bz; ynorm -= az * bx; ynorm = 0.5f * ynorm; /* Pressure values store at every face node */ double p0 = q[g->c->bsz * n0]; double p1 = q[g->c->bsz * n1]; double p2 = q[g->c->bsz * n2]; double press = (p0 + p1 + p2) / 3.f; double cp = 2.f * (press - 1.f); double dcx = cp * xnorm; double dcy = cp * ynorm; double xmid = x0 + x1 + x2; double ymid = y0 + y1 + y2; lift = lift - dcx * iv->v + dcy * iv->u; drag = drag + dcx * iv->u + dcy * iv->v; momn = momn + (xmid - 0.25f) * dcy - ymid * dcx; } } (* f->clift) = lift; (* f->cdrag) = drag; (* f->cmomn) = momn; compute_time(&ktime, &f->t->forces); #ifdef __USE_HW_COUNTER const uint64_t cycle = __rdtsc() - icycle; struct counters end; perf_read(fd, &end); struct tot tot; perf_calc(start, end, &tot); f->perf_counters->ctrs->forces.cycles += cycle; f->perf_counters->ctrs->forces.tot.imcR += tot.imcR; f->perf_counters->ctrs->forces.tot.imcW += tot.imcW; f->perf_counters->ctrs->forces.tot.edcR += tot.edcR; f->perf_counters->ctrs->forces.tot.edcW += tot.edcW; #endif }
GB_unaryop__identity_uint8_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint8_bool // op(A') function: GB_tran__identity_uint8_bool // C type: uint8_t // A type: bool // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint8_bool ( uint8_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint8_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Sum_N_numbers_mp_CS.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <time.h> #define n 100000 int main() { double a[n], random_a; double sum=0, privatesum; float startTime, endTime,execTime; int i; srand(time(0)); startTime = omp_get_wtime(); #pragma omp parallel private (i,privatesum) shared (a, sum) { privatesum=0; #pragma omp for for(i=0;i<n;i++) { random_a = rand(); a[i] = i * random_a; for(int j=1;j<n;j++) privatesum = privatesum + a[i]; } #pragma omp critical { sum = sum + privatesum; } } endTime = omp_get_wtime(); execTime = endTime - startTime; printf("%f \n",execTime); return(0); }
jacobi.c
#include <stdio.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif // Add timing support #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t, NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double time1, time2; void driver(void); void initialize(void); void jacobi(void); void error_check(void); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve paralleism. * All do loops are parallized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 500 int n,m,mits; double tol,relax=1.0,alpha=0.0543; double u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE]; double dx,dy; int main (void) { float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n=MSIZE; m=MSIZE; tol=0.0000000001; mits=5000; #ifdef _OPENMP #pragma omp parallel { #pragma omp single printf("Running using %d threads...\n",omp_get_num_threads()); } #endif driver ( ) ; return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialized. * * Working variables/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver( ) { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi (); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n",time2-time1); /* error_check (n,m,alpha,dx,dy,u,f)*/ error_check ( ); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize( ) { int i,j, xx,yy; //double PI=3.1415926; dx = 2.0 / (n-1); dy = 2.0 / (m-1); /* Initialize initial condition and RHS */ #pragma omp parallel for private(xx,yy,j,i) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx =(int)( -1.0 + dx * (i-1)); yy = (int)(-1.0 + dy * (j-1)) ; u[i][j] = 0.0; f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\ - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy); } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi( ) { double omega; int i,j,k; double error,resid,ax,ay,b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega=relax; /* * Initialize coefficients */ ax = 1.0/(dx*dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y-direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; while ((k<=mits)&&(error>tol)) { error = 0.0; /* Copy new solution into old */ #pragma omp parallel { #pragma omp for private(j,i) for(i=0;i<n;i++) for(j=0;j<m;j++) uold[i][j] = u[i][j]; #pragma omp for private(resid,j,i) reduction(+:error) nowait for (i=1;i<(n-1);i++) for (j=1;j<(m-1);j++) { resid = (ax*(uold[i-1][j] + uold[i+1][j])\ + ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b; u[i][j] = uold[i][j] - omega * resid; error = error + resid*resid ; } } /* omp end parallel */ /* Error check */ k = k + 1; if (k%500==0) printf("Finished %d iteration.\n",k); error = sqrt(error)/(n*m); } /* End iteration loop */ printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n", error); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check ( ) { int i,j; double xx,yy,temp,error; dx = 2.0 / (n-1); dy = 2.0 / (m-1); error = 0.0 ; #pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx = -1.0 + dx * (i-1); yy = -1.0 + dy * (j-1); temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy); error = error + temp*temp; } error = sqrt(error)/(n*m); printf("Solution Error :%E \n",error); }
LPfold.c
/* Last changed Time-stamp: <2009-02-18 14:19:51 ivo> */ /* local pair probabilities for RNA secondary structures Stephan Bernhart, Ivo L Hofacker Vienna RNA package */ /* todo: compute energy z-score for each window */ #include <config.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> /* #defines FLT_MAX ... */ #include "utils.h" #include "energy_par.h" #include "fold_vars.h" #include "pair_mat.h" #include "PS_dot.h" #include "part_func.h" #include "params.h" #include "loop_energies.h" #include "LPfold.h" #include "Lfold.h" #ifdef _OPENMP #include <omp.h> #endif /*@unused@*/ PRIVATE char rcsid[] UNUSED = "$Id: LPfold.c,v 1.8 2009/02/18 20:34:38 ivo Exp $"; #define ISOLATED 256.0 /* ################################# # GLOBAL VARIABLES # ################################# */ /* ################################# # PRIVATE VARIABLES # ################################# */ PRIVATE float cutoff; PRIVATE int num_p=0; /* for counting basepairs in pairlist pl, can actually be moved into pfl_fold */ PRIVATE FLT_OR_DBL *expMLbase=NULL; PRIVATE FLT_OR_DBL **q=NULL, **qb=NULL, **qm=NULL, *qqm=NULL, *qqm1=NULL, *qq=NULL, *qq1=NULL, **pR=NULL, **qm2=NULL, **QI5=NULL, **q2l=NULL, **qmb=NULL;/*,**QI3,*/ PRIVATE FLT_OR_DBL *prml=NULL, *prm_l=NULL, *prm_l1=NULL, *q1k=NULL, *qln=NULL; PRIVATE FLT_OR_DBL *scale=NULL; PRIVATE char **ptype=NULL; /* precomputed array of pair types */ PRIVATE int *jindx=NULL; PRIVATE int *my_iindx=NULL; PRIVATE int init_length = 0; /* length in last call to init_pf_fold() */ PRIVATE pf_paramT *pf_params=NULL; PRIVATE short *S=NULL, *S1=NULL; PRIVATE int unpaired; PRIVATE int ulength; PRIVATE int pUoutput; PRIVATE double alpha = 1.0; #ifdef _OPENMP /* NOTE: all variables are assumed to be uninitialized if they are declared as threadprivate */ #pragma omp threadprivate(cutoff, num_p, scale, ptype, jindx, my_iindx, init_length, pf_params,\ expMLbase, q, qb, qm, qqm, qqm1, qq, qq1, pR, qm2, QI5, q2l, qmb,\ prml, prm_l, prm_l1, q1k, qln,\ S, S1, unpaired, ulength, pUoutput, alpha) #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE void init_partfunc_L(int length, pf_paramT *parameters); PRIVATE void get_arrays_L(unsigned int length); PRIVATE void free_pf_arrays_L(void); PRIVATE void scale_pf_params(unsigned int length, pf_paramT *parameters); PRIVATE void GetPtype(int j, int pairsize, const short *S, int n); PRIVATE void FreeOldArrays(int i); PRIVATE void GetNewArrays(int j, int winSize); PRIVATE void printpbar(FLT_OR_DBL **prb,int winSize, int i, int n); PRIVATE plist *get_deppp(struct plist *pl, int start, int pairsize, int length); PRIVATE plist *get_plistW(struct plist *pl, int length, int start, FLT_OR_DBL **Tpr, int winSize); PRIVATE void print_plist(int length, int start, FLT_OR_DBL **Tpr, int winSize, FILE *fp); PRIVATE void compute_pU(int k, int ulength, double **pU, int winSize, int n, char *sequence); PRIVATE void putoutpU(double **pU,int k, int ulength, FILE *fp); /*PRIVATE void make_ptypes(const short *S, const char *structure);*/ PRIVATE void putoutpU_splitup(double **pUx, int k, int ulength, FILE *fp, char ident); PRIVATE void compute_pU_splitup(int k, int ulength, double **pU, double **pUO, double **pUH, double **pUI, double **pUM, int winSize,int n, char *sequence); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ PRIVATE void init_partfunc_L(int length, pf_paramT *parameters){ if (length<1) nrerror("init_partfunc_L: length must be greater 0"); #ifdef _OPENMP /* Explicitly turn off dynamic threads */ omp_set_dynamic(0); free_pf_arrays_L(); /* free previous allocation */ #else if (init_length>0) free_pf_arrays_L(); /* free previous allocation */ #endif #ifdef SUN4 nonstandard_arithmetic(); #else #ifdef HP9 fpsetfastmode(1); #endif #endif make_pair_matrix(); get_arrays_L((unsigned) length); scale_pf_params((unsigned) length, parameters); #ifndef _OPENMP init_length = length; #endif } PRIVATE void get_arrays_L(unsigned int length){ /*arrays in 2 dimensions*/ q = (FLT_OR_DBL **) space(sizeof(FLT_OR_DBL *)*(length+1)); qb = (FLT_OR_DBL **) space(sizeof(FLT_OR_DBL *)*(length+1)); qm = (FLT_OR_DBL **) space(sizeof(FLT_OR_DBL *)*(length+1)); pR = (FLT_OR_DBL **) space(sizeof(FLT_OR_DBL *)*(length+1)); q1k = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) *(length+1)); qln = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) *(length+2)); qq = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) *(length+2)); qq1 = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) *(length+2)); qqm = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) *(length+2)); qqm1 = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) *(length+2)); prm_l = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) *(length+2)); prm_l1 = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) *(length+2)); prml = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) *(length+2)); expMLbase = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) *(length+1)); scale = (FLT_OR_DBL *) space(sizeof(FLT_OR_DBL) *(length+1)); ptype = (char **) space(sizeof(char *) *(length+2)); if (ulength>0) { /* QI3 = (FLT_OR_DBL **) space((length+1)*sizeof(FLT_OR_DBL *));*/ QI5 = (FLT_OR_DBL **) space((length+1)*sizeof(FLT_OR_DBL *)); qmb = (FLT_OR_DBL **) space((length+1)*sizeof(FLT_OR_DBL *)); qm2 = (FLT_OR_DBL **) space((length+1)*sizeof(FLT_OR_DBL *)); q2l = (FLT_OR_DBL **) space((length+1)*sizeof(FLT_OR_DBL *)); } my_iindx = get_iindx(length); iindx = get_iindx(length); /* for backward compatibility and Perl wrapper */ jindx = get_indx(length); } PRIVATE void free_pf_arrays_L(void){ if(q) free(q); if(qb) free(qb); if(qm) free(qm); if(pR) free(pR); if(qm2) free(qm2); if(qq) free(qq); if(qq1) free(qq1); if(qqm) free(qqm); if(qqm1) free(qqm1); if(q1k) free(q1k); if(qln) free(qln); if(prm_l) free(prm_l); if(prm_l1) free(prm_l1); if(prml) free(prml); if(expMLbase) free(expMLbase); if(scale) free(scale); if(my_iindx) free(my_iindx); if(iindx) free(iindx); /* for backward compatibility and Perl wrapper */ if(jindx) free(jindx); if(ptype) free(ptype); if(QI5) free(QI5); if(qmb) free(qmb); if(q2l) free(q2l); if(pf_params) free(pf_params); q = qb = qm = pR = QI5 = qmb = qm2 = q2l = NULL; qq = qq1 = qqm = qqm1 = q1k = qln = prml = prm_l = prm_l1 = expMLbase = NULL; my_iindx = jindx = iindx = NULL; pf_params = NULL; ptype = NULL; scale = NULL; #ifdef SUN4 standard_arithmetic(); #else #ifdef HP9 fpsetfastmode(0); #endif #endif #ifndef _OPENMP init_length=0; #endif } PUBLIC void update_pf_paramsLP(int length){ update_pf_paramsLP_par(length, NULL); } PUBLIC void update_pf_paramsLP_par(int length, pf_paramT *parameters){ #ifdef _OPENMP init_partfunc_L(length, parameters); #else if(parameters) init_partfunc_L(length, parameters); else if (length > init_length) init_partfunc_L(length, parameters); else { /* make_pair_matrix();*/ scale_pf_params((unsigned) length, parameters); } #endif } PUBLIC plist *pfl_fold( char *sequence, int winSize, int pairSize, float cutoffb, double **pU, struct plist **dpp2, FILE *pUfp, FILE *spup){ return pfl_fold_par(sequence, winSize, pairSize, cutoffb, pU, dpp2, pUfp, spup, NULL); } PUBLIC plist *pfl_fold_par( char *sequence, int winSize, int pairSize, float cutoffb, double **pU, struct plist **dpp2, FILE *pUfp, FILE *spup, pf_paramT *parameters){ int n, m, i, j, k, l, u, u1, ii, type, type_2, tt, ov, do_dpp, simply_putout, noGUclosure; double max_real; FLT_OR_DBL temp, Qmax, prm_MLb, prmt, prmt1, qbt1, *tmp, expMLclosing; plist *dpp, *pl; int split=0; ov = 0; Qmax = 0; do_dpp = 0; simply_putout = 0; dpp = NULL; pl = NULL; pUoutput = 0; ulength = 0; cutoff = cutoffb; if(pU != NULL) ulength = (int)pU[0][0]+0.49; if(spup !=NULL) simply_putout = 1; /*can't have one without the other*/ if(pUfp!=NULL) pUoutput = 1; else if((pUoutput)&&(ulength!=0)){ fprintf(stderr, "There was a problem with non existing File Pointer for unpaireds, terminating process\n"); return pl; } dpp = *dpp2; if(dpp !=NULL) do_dpp=1; n = (int) strlen(sequence); if (n<TURN+2) return 0; #ifdef _OPENMP /* always init everything since all global static variables are uninitialized when entering a thread */ init_partfunc_L(n, parameters); #else if(parameters) init_partfunc_L(n, parameters); else if (n > init_length) init_partfunc_L(n, parameters); else if (fabs(pf_params->temperature - temperature)>1e-6) update_pf_paramsLP_par(n, parameters); #endif expMLclosing = pf_params->expMLclosing; noGUclosure = pf_params->model_details.noGUclosure; max_real = (sizeof(FLT_OR_DBL) == sizeof(float)) ? FLT_MAX : DBL_MAX; S = encode_sequence(sequence, 0); S1 = encode_sequence(sequence, 1); /* make_ptypes(S, structure); das machmadochlieber lokal, ey!*/ /*here, I allocate memory for pU, if has to be saved, I allocate all in one go, if pU is put out and freed, I only allocate what I really need*/ if (ulength>0){ if (pUoutput) { for (i=1; i<=ulength; i++) pU[i]=(double *)space((MAX2(MAXLOOP,ulength)+2)*sizeof(double)); } else { for (i=1; i<=n; i++) pU[i]=(double *)space((MAX2(MAXLOOP,ulength)+2)*sizeof(double)); } } /*array initialization ; qb,qm,q qb,qm,q (i,j) are stored as ((n+1-i)*(n-i) div 2 + n+1-j */ num_p = 0; pl = (struct plist *)space(1000*sizeof(struct plist)); /*ALWAYS q[i][j] => i>j!!*/ for (j=1; j<MIN2(TURN+2,n); j++) { /*allocate start*/ GetNewArrays(j, winSize); GetPtype(j,pairSize,S,n); for (i=1; i<=j; i++) q[i][j]=scale[(j-i+1)]; } for (j=TURN+2;j<=n+winSize; j++) { if (j<=n) { GetNewArrays(j, winSize); GetPtype(j,pairSize,S,n); for (i=MAX2(1,j-winSize); i<=j/*-TURN*/; i++) q[i][j]=scale[(j-i+1)]; for (i=j-TURN-1;i>=MAX2(1,(j-winSize+1)); i--) { /* construction of partition function of segment i,j*/ /*firstly that given i bound to j : qb(i,j) */ u = j-i-1; type = ptype[i][j]; if (type!=0) { /*hairpin contribution*/ if (((type==3)||(type==4))&&noGUclosure) qbt1 = 0; else qbt1 = exp_E_Hairpin(u, type, S1[i+1], S1[j-1], sequence+i-1, pf_params) * scale[u+2]; /* interior loops with interior pair k,l */ for (k=i+1; k<=MIN2(i+MAXLOOP+1,j-TURN-2); k++) { u1 = k-i-1; for (l=MAX2(k+TURN+1,j-1-MAXLOOP+u1); l<j; l++) { type_2 = ptype[k][l]; if (type_2) { type_2 = rtype[type_2]; qbt1 += qb[k][l] * exp_E_IntLoop(u1, j-l-1, type, type_2, S1[i+1], S1[j-1], S1[k-1], S1[l+1], pf_params) * scale[k-i+j-l]; } } } /*multiple stem loop contribution*/ ii = my_iindx[i+1]; /* ii-k=[i+1,k-1] */ temp = 0.0; for (k=i+2; k<=j-1; k++) temp += qm[i+1][k-1]*qqm1[k]; tt = rtype[type]; qbt1 += temp * expMLclosing * exp_E_MLstem(tt, S1[j-1], S1[i+1], pf_params) * scale[2]; qb[i][j] = qbt1; } /* end if (type!=0) */ else qb[i][j] = 0.0; /* construction of qqm matrix containing final stem contributions to multiple loop partition function from segment i,j */ qqm[i] = qqm1[i]*expMLbase[1]; if (type) { qbt1 = qb[i][j] * exp_E_MLstem(type, (i>1) ? S1[i-1] : -1, (j<n) ? S1[j+1] : -1, pf_params); qqm[i] += qbt1; } /*construction of qm matrix containing multiple loop partition function contributions from segment i,j */ temp = 0.0; /*ii = my_iindx[i]; ii-k=[i,k-1] */ /*new qm2 computation done here*/ for (k=i+1; k<=j; k++) temp += (qm[i][k-1])*qqm[k]; if (ulength>0) qm2[i][j]=temp;/*new qm2 computation done here*/ for (k=i+1; k<=j; k++) temp += expMLbase[k-i] * qqm[k]; qm[i][j] = (temp + qqm[i]); /*auxiliary matrix qq for cubic order q calculation below */ qbt1 = qb[i][j]; if (type) { qbt1 *= exp_E_ExtLoop(type, (i>1) ? S1[i-1] : -1, (j < n) ? S1[j+1] : -1, pf_params); } qq[i] = qq1[i]*scale[1] + qbt1; /*construction of partition function for segment i,j */ temp = 1.0*scale[1+j-i] + qq[i]; for (k=i; k<=j-1; k++) temp += q[i][k]*qq[k+1]; q[i][j] = temp; if (temp>Qmax) { Qmax = temp; if (Qmax>max_real/10.) fprintf(stderr, "Q close to overflow: %d %d %g\n", i,j,temp); } if (temp>=max_real) { PRIVATE char msg[128]; snprintf(msg, 128, "overflow in pf_fold while calculating q[%d,%d]\n" "use larger pf_scale", i,j); nrerror(msg); } } /*end for i*/ tmp = qq1; qq1 =qq; qq =tmp; tmp = qqm1; qqm1=qqm; qqm=tmp; } /* just as a general service, I save here the free energy of the windows no output is generated, however,... */ if ((j>=winSize) && (j<=n) && (ulength) && !(pUoutput)) { double Fwindow=0.; Fwindow=(-log(q[j-winSize+1][j])-winSize*log(pf_params->pf_scale))*pf_params->kT/1000.0; pU[j][0]=Fwindow; /* if (ulength>=winSize) pU[j][winSize]=scale[winSize]/q[j-winSize+1][j]; */ } if (j>winSize) { Qmax=0; /* i=j-winSize; */ /* initialize multiloopfs */ for (k=j-winSize; k<=MIN2(n,j); k++) { prml[k]=0; prm_l[k]=0; /* prm_l1[k]=0; others stay*/ } prm_l1[j-winSize]=0; k=j-winSize; for (l=k+TURN+1; l<=MIN2(n,k+winSize-1); l++) { int a; pR[k][l] = 0; /* set zero at start */ type=ptype[k][l]; if (qb[k][l]==0) continue; for (a=MAX2(1,l-winSize+2); a<MIN2(k,n-winSize+2);a++) pR[k][l]+=q[a][k-1]*q[l+1][a+winSize-1]/q[a][a+winSize-1]; if (l-k+1==winSize) pR[k][l]+=1./q[k][l]; else { if (k+winSize-1<=n) /* k outermost */ pR[k][l]+=q[l+1][k+winSize-1]/q[k][k+winSize-1]; if (l-winSize+1>=1) /*l outermost*/ pR[k][l]+=q[l-winSize+1][k-1]/q[l-winSize+1][l]; } pR[k][l] *= exp_E_ExtLoop(type, (k>1) ? S1[k-1] : -1, (l<n) ? S1[l+1] : -1, pf_params); type_2 = ptype[k][l]; type_2 = rtype[type_2]; for (i=MAX2(MAX2(l-winSize+1,k-MAXLOOP-1),1); i<=k-1; i++) { for (m=l+1; m<=MIN2(MIN2(l+ MAXLOOP -k+i+2,i+winSize-1),n); m++) { type = ptype[i][m]; if ((pR[i][m]>0)) pR[k][l] += pR[i][m]*exp_E_IntLoop(k-i-1, m-l-1, type, type_2, S1[i+1], S1[m-1], S1[k-1], S1[l+1], pf_params) * scale[k-i+m-l]; } } if (ulength) { /* NOT IF WITHIN INNER LOOP */ for (i=MAX2(MAX2(l-winSize+1,k-MAXLOOP-1),1); i<=k-1; i++) { for (m=l+1; m<=MIN2(MIN2(l+ MAXLOOP -k+i+2,i+winSize-1),n); m++) { type = ptype[i][m]; if ((pR[i][m]>0)){ temp=pR[i][m]*qb[k][l]*exp_E_IntLoop(k-i-1, m-l-1, type, type_2, S1[i+1], S1[m-1], S1[k-1], S1[l+1], pf_params) * scale[k-i+m-l]; QI5[l][m-l-1]+=temp; QI5[i][k-i-1]+=temp; } } } } } /* 3. bonding k,l as substem of multi-loop enclosed by i,m */ prm_MLb = 0.; if(k>1) /*sonst nix!*/ for (l=MIN2(n-1,k+winSize-2); l>=k+TURN+1; l--) { /* opposite direction */ m=l+1; prmt = prmt1 = 0.0; tt = ptype[k-1][m]; tt=rtype[tt]; prmt1 = pR[k-1][m] * expMLclosing * exp_E_MLstem(tt, S1[l], S1[k], pf_params); for (i=MAX2(1,l-winSize+2); i<k-1/*TURN*/; i++) { tt = ptype[i][m]; tt = rtype[tt]; prmt += pR[i][m] * exp_E_MLstem(tt, S1[m-1], S1[i+1], pf_params) * qm[i+1][k-1]; } tt = ptype[k][l]; prmt *= expMLclosing; prml[ m] = prmt; prm_l[m] = prm_l1[m]*expMLbase[1]+prmt1; prm_MLb = prm_MLb*expMLbase[1] + prml[m]; /* same as: prm_MLb = 0; for (i=n; i>k; i--) prm_MLb += prml[i]*expMLbase[k-i-1]; */ prml[m] = prml[ m] + prm_l[m]; if (qb[k][l] == 0.) continue; temp = prm_MLb; if (ulength) { double dang; /* coefficient for computations of unpairedarrays */ dang = qb[k][l] * exp_E_MLstem(tt, S1[k-1], S1[l+1], pf_params) * scale[2]; for (m=MIN2(k+winSize-2,n);m>=l+2; m--){ qmb[l][m-l-1] += prml[m]*dang; q2l[l][m-l-1] += (prml[m]-prm_l[m])*dang; } } for (m=MIN2(k+winSize-2,n);m>=l+2; m--) temp += prml[m]*qm[l+1][m-1]; temp *= exp_E_MLstem(tt, (k>1) ? S1[k-1] : -1, (l<n) ? S1[l+1] : -1, pf_params) * scale[2]; pR[k][l] += temp; if (pR[k][l]>Qmax) { Qmax = pR[k][l]; if (Qmax>max_real/10.) fprintf(stderr, "P close to overflow: %d %d %g %g\n", i, m, pR[k][l], qb[k][l]); } if (pR[k][l]>=max_real) { ov++; pR[k][l]=FLT_MAX; } } /* end for (l=..) */ tmp = prm_l1; prm_l1=prm_l; prm_l=tmp; /* end for (l=..) */ if ((ulength)&&(k-MAXLOOP-1>0)){ /* if (pUoutput) pU[k-MAXLOOP-1]=(double *)space((ulength+2)*sizeof(double)); */ if(split){ /*generate the new arrays, if you want them somewhere else, you have to generate them and overgive them ;)*/ double **pUO; double **pUI; double **pUM; double **pUH; pUO= (double **) space((n+1)*sizeof(double *)); pUI= (double **) space((n+1)*sizeof(double *)); pUM= (double **) space((n+1)*sizeof(double *)); pUH= (double **) space((n+1)*sizeof(double *)); if (pUoutput) { for (i=1; i<=ulength; i++) { pUH[i]=(double *)space((MAX2(MAXLOOP,ulength)+2)*sizeof(double)); pUI[i]=(double *)space((MAX2(MAXLOOP,ulength)+2)*sizeof(double)); pUO[i]=(double *)space((MAX2(MAXLOOP,ulength)+2)*sizeof(double)); pUM[i]=(double *)space((MAX2(MAXLOOP,ulength)+2)*sizeof(double)); } } //dont want to have that yet? /* else { for (i=1; i<=n; i++) pU[i]=(double *)space((MAX2(MAXLOOP,ulength)+2)*sizeof(double)); }*/ compute_pU_splitup(k-MAXLOOP-1,ulength,pU,pUO,pUH, pUI, pUM, winSize, n, sequence); if (pUoutput) { putoutpU_splitup(pUO,k-MAXLOOP-1, ulength, pUfp,'E'); putoutpU_splitup(pUH,k-MAXLOOP-1, ulength, pUfp,'H'); putoutpU_splitup(pUI,k-MAXLOOP-1, ulength, pUfp,'I'); putoutpU_splitup(pUM,k-MAXLOOP-1, ulength, pUfp,'M'); } } else { compute_pU(k-MAXLOOP-1,ulength,pU, winSize, n, sequence); /* here, we put out and free pUs not in use any more (hopefully) */ if (pUoutput) putoutpU(pU,k-MAXLOOP-1, ulength, pUfp); } } if (j-(2*winSize+MAXLOOP+1)>0) { printpbar(pR,winSize,j-(2*winSize+MAXLOOP+1),n); if (simply_putout) { print_plist(n, j-(2*winSize+MAXLOOP+1), pR, winSize, spup); } else{ pl=get_plistW(pl, n, j-(2*winSize+MAXLOOP+1), pR, winSize); } if (do_dpp)dpp=get_deppp(dpp,j-(2*winSize-MAXLOOP),pairSize, n); FreeOldArrays(j-(2*winSize+MAXLOOP+1)); } } /* end if (do_backtrack)*/ }/* end for j */ /* finish output and free */ for (j=MAX2(1,n-MAXLOOP); j<=n;j++) { /* if (pUoutput) pU[j]=(double *)space((ulength+2)*sizeof(double)); */ if (ulength) compute_pU(j,ulength,pU, winSize, n, sequence); /*here, we put out and free pUs not in use any more (hopefully)*/ if (pUoutput) putoutpU(pU,j, ulength, pUfp); } for (j=MAX2(n-winSize-MAXLOOP,1); j<=n; j++) { printpbar(pR,winSize,j,n); if (simply_putout) { print_plist(n, j, pR, winSize, spup); } else { pl=get_plistW(pl, n, j, pR, winSize); } if ((do_dpp)&&j<n) dpp=get_deppp(dpp,j,pairSize, n); FreeOldArrays(j); } /* free_pf_arrays_L(); */ free(S); free(S1); S = S1 = NULL; if (ov>0) fprintf(stderr, "%d overflows occurred while backtracking;\n" "you might try a smaller pf_scale than %g\n", ov, pf_params->pf_scale); *dpp2=dpp; return pl; } PRIVATE void scale_pf_params(unsigned int length, pf_paramT *parameters){ unsigned int i; double kT, scaling_factor; if(pf_params) free(pf_params); if(parameters){ pf_params = get_boltzmann_factor_copy(parameters); } else { model_detailsT md; set_model_details(&md); pf_params = get_boltzmann_factors(temperature, alpha, md, pf_scale); } scaling_factor = pf_params->pf_scale; kT = pf_params->kT; /* kT in cal/mol */ /* scaling factors (to avoid overflows) */ if (scaling_factor == -1) { /* mean energy for random sequences: 184.3*length cal */ scaling_factor = exp(-(-185+(pf_params->temperature-37.)*7.27)/kT); if (scaling_factor<1) scaling_factor=1; pf_params->pf_scale = scaling_factor; } scale[0] = 1.; scale[1] = 1./scaling_factor; expMLbase[0] = 1; expMLbase[1] = pf_params->expMLbase/scaling_factor; for (i=2; i<=length; i++) { scale[i] = scale[i/2]*scale[i-(i/2)]; expMLbase[i] = pow(pf_params->expMLbase, (double)i) * scale[i]; } } PRIVATE void printpbar(FLT_OR_DBL **prb,int winSize, int i, int n) { int j; int howoften=0; /* how many samples do we have for this pair */ int pairdist; for (j=i+TURN; j<MIN2(i+winSize,n+1); j++) { pairdist=(j-i+1); /*4cases*/ howoften=MIN2(winSize-pairdist+1,i); /*pairdist,start*/ howoften=MIN2(howoften,n-j+1); /*end*/ howoften=MIN2(howoften,n-winSize+1); /*windowsize*/ prb[i][j] *= qb[i][j]/howoften; } return; } PRIVATE void FreeOldArrays(int i) { /*free arrays no longer needed*/ free(pR[i]+i); free(q[i]+i); free(qb[i]+i); free(qm[i]+i); if (ulength!=0) { free(qm2[i]+i); free(QI5[i]); free(qmb[i]); free(q2l[i]); } free(ptype[i]+i); return; } PRIVATE void GetNewArrays(int j, int winSize) { /*allocate new part of arrays*/ pR[j]=(FLT_OR_DBL *)space((winSize+1)*sizeof(FLT_OR_DBL)); pR[j]-=j; q[j]=(FLT_OR_DBL *)space((winSize+1)*sizeof(FLT_OR_DBL)); q[j]-=j; qb[j]=(FLT_OR_DBL *)space((winSize+1)*sizeof(FLT_OR_DBL)); qb[j]-=j; qm[j]=(FLT_OR_DBL *)space((winSize+1)*sizeof(FLT_OR_DBL)); qm[j]-=j; if (ulength!=0) { qm2[j]=(FLT_OR_DBL *)space((winSize+1)*sizeof(FLT_OR_DBL)); qm2[j]-=j; QI5[j]=(FLT_OR_DBL *)space((winSize+1)*sizeof(FLT_OR_DBL)); qmb[j]=(FLT_OR_DBL *)space((winSize+1)*sizeof(FLT_OR_DBL)); q2l[j]=(FLT_OR_DBL *)space((winSize+1)*sizeof(FLT_OR_DBL)); } ptype[j]=(char *)space((winSize+1)*sizeof(char)); ptype[j]-=j; return; } PRIVATE void GetPtype(int i, int winSize,const short *S,int n) { /*make new entries in ptype array*/ int j; int type; for (j=i; j<=MIN2(i+winSize,n); j++) { type = pair[S[i]][S[j]]; ptype[i][j] = (char) type; } return; } PRIVATE plist *get_plistW(plist *pl, int length, int start, FLT_OR_DBL **Tpr, int winSize) { /* get pair probibilities out of pr array */ int j, max_p; max_p=1000; while (max_p<num_p) max_p*=2; for (j=start+1; j<=MIN2(start+winSize, length); j++) { if (Tpr[start][j]<cutoff) continue; if (num_p==max_p-1) { max_p*=2; pl=(plist *)xrealloc(pl,max_p*sizeof(plist)); } pl[num_p].i=start; pl[num_p].j=j; pl[num_p++].p=Tpr[start][j]; } /* mark end of data with zeroes */ pl[num_p].i=0; pl[num_p].j=0; pl[num_p].p=0.; /* pl=(struct plist *)xrealloc(pl,(count)*sizeof(struct plist)); */ return pl; } PRIVATE plist *get_deppp(plist *pl, int start, int pairsize, int length) { /* compute dependent pair probabilities */ int i, j, count=0; double tmp; plist *temp; temp=(plist *)space(pairsize*sizeof(plist)); /* holds temporary deppp */ for (j=start+TURN; j<MIN2(start+pairsize,length); j++) { if ((qb[start][j]*qb[start-1][(j+1)])>10e-200) { int type=ptype[start-1][j+1]; int type_2=rtype[ptype[start][j]]; tmp=qb[start][j]/qb[start-1][(j+1)]*exp_E_IntLoop(0, 0, type, type_2, S1[start], S1[j], S1[start-1], S1[j+1], pf_params) * scale[2]; temp[count].i=start; temp[count].j=j; temp[count++].p=tmp; } } /* write it to list of deppps */ for (i=0; pl[i].i!=0; i++); pl=(plist *)xrealloc(pl,(i+count+1)*sizeof(plist)); for (j=0; j<count; j++) { pl[i+j].i=temp[j].i; pl[i+j].j=temp[j].j; pl[i+j].p=temp[j].p; } pl[i+count].i=0; pl[i+count].j=0; pl[i+count].p=0; free(temp); return pl; } PRIVATE void print_plist(int length,int start, FLT_OR_DBL **Tpr, int winSize, FILE *fp) { /* print out of pr array, do not save */ int j; for (j=start+1; j<=MIN2(start+winSize, length); j++) { if (Tpr[start][j]<cutoff) continue; fprintf(fp,"%d %d %g\n",start,j,Tpr[start][j]); } /* mark end of data with zeroes */ return ; } PRIVATE void compute_pU(int k, int ulength, double **pU, int winSize,int n, char *sequence) { /* here, we try to add a function computing all unpaired probabilities starting at some i, going down to $unpaired, to be unpaired, i.e. a list with entries from 1 to unpaired for every i, with the probability of a stretch of length x, starting at i-x+1, to be unpaired */ int startu; int i5; int j3, len, obp; double temp; double *QBE; FLT_OR_DBL expMLclosing = pf_params->expMLclosing; QBE=(double *) space((MAX2(ulength,MAXLOOP)+2)*sizeof(double)); /* first, we will */ /* for k<=ulength, pU[k][k]=0, because no bp can enclose it */ if (pUoutput&&k+ulength<=n) pU[k+ulength]=(double *)space((ulength+2)*sizeof(double)); /*compute pu[k+ulength][ulength] */ for (i5=MAX2(k+ulength-winSize+1,1);i5<=k;i5++) { for (j3=k+ulength+1; j3<=MIN2(n,i5+winSize-1); j3++) { /* if (k>400) { printf("i%d j%d ",i5,j3); fflush(stdout); } */ if (ptype[i5][j3]!=0) {/**/ /* (.. >-----|..........) i5 j j+ulength j3 */ /*Multiloops*/ temp = (i5<k) ? qm2[i5+1][k] * expMLbase[j3-k-1] : 0.; /* (..{}{}-----|......) */ if(j3-1>k+ulength) temp += qm2[k+ulength+1][j3-1] * expMLbase[k+ulength-i5]; /* (..|-----|{}{}) */ if((i5<k)&&(j3-1>k+ulength)) temp += qm[i5+1][k] * qm[k+ulength+1][j3-1] * expMLbase[ulength]; /* ({}|-----|{}) */ /* add dangles, multloopclosing etc. */ temp *= exp_E_MLstem(rtype[ptype[i5][j3]], S1[j3-1], S1[i5+1], pf_params) * scale[2] * expMLclosing; /*add hairpins*/ temp += exp_E_Hairpin(j3-i5-1, ptype[i5][j3], S1[i5+1], S1[j3-1], sequence+i5-1, pf_params) * scale[j3-i5+1]; /*add outer probability*/ temp *= pR[i5][j3]; pU[k+ulength][ulength] += temp; } } } /* code doubling to avoid if within loop */ #if 0 /*initialization for interior loops, it is not recomended to have verysmall ulengths!!*/ if (ulength<MAXLOOP) { int k5; int l3; int outype; /* kl bp is 5' */ /* MAXLOOP>((l5-k5-1)+(j3-l3-1) k-winSize+ulength<i5<k-TURN-1; k+ulength<j3<=k+MAXLOOP+1 if i then use l3, it is easier by far: j3-MAXLOOP<=l3<=k i5<k5<k-TURN k5<=i5+l3+2+MAXLOOP-j3 k5+TURN<l3<=k */ for (i5=MAX2(k+ulength-winSize,1);i5<k-TURN-1;i5++) { for (j3=k+ulength+1; j3<=MIN2(n,MIN2(i5+winSize-1,k+MAXLOOP+1)); j3++) { double temp=0; if (outype=ptype[i5][j3]>0) /* oder so halt */ for (l3=MAX2(i5+TURN+1,j3-MAXLOOP-1); l3<=k; l3++){ for (k5=i5+1; k5<=MIN2(l3-TURN-1,MAXLOOP+i5+l3+2-j3); k5++){ if (ptype[k5][l3]) { temp+= qb[k5][l3]*expLoopEnergy(k5-i5-1, j3-l3-1, outype, rtype[ptype[k5][l3]], S1[i5+1], S1[j3-1], S1[k5-1], S1[l3+1]); } } } temp*=pR[i5][j3]; pU[k+ulength][ulength]+= temp; } } /* kl bp is 3' */ /* k+ulength-MAXLOOP<=i5<=k k+ulength+1+TURN<j3<i5+winSize k+ulength+1<=k5<i5+MAXLOOP+2 || k5<j3-TURN k5<l3<j3 || j3-k5-i5-2-ML<=l3<j3 */ for (i5=MAX2(1,MAX2(k+ulength-winSize,k+ulength-MAXLOOP));i5<=k; i5++){ for (j3=k+ulength+TURN+2; j3<MIN2(n+1,i5+winSize); j3++) { double temp = 0; if (outype=ptype[i5][j3]>0) /* oder so halt */ for (k5=k+ulength+1; k5<MIN2(j3-TURN-1,i5+MAXLOOP+2); k5++) { for (l3=MAX2(k5+TURN+1,j3+k5-i5-2-MAXLOOP); l3<j3; l3++) { if (ptype[k5][l3]) temp += qb[k5][l3]*expLoopEnergy(k5-i5-1, j3-l3-1, outype, rtype[ptype[k5][l3]], S1[i5+1], S1[j3-1], S1[k5-1], S1[l3+1]); } } temp*=pR[i5][j3]; pU[k+ulength][ulength]+= temp; } } } /* Add up Is QI5[l][m-l-1] QI3 */ /* Add up Interior loop terms */ temp=0.; for (len=winSize; len>=ulength; len--) temp+=QI3[k][len]; for (;len>0; len--) { temp += QI3[k][len]; QBE[len] += temp; } #endif temp=0.; for (len=winSize; len>=MAX2(ulength,MAXLOOP); len--) temp+=QI5[k][len]; for (;len>0; len--) { temp += QI5[k][len]; QBE[len] += temp; /* replace QBE with QI */ } /* Add Hairpinenergy to QBE */ temp=0.; for(obp = MIN2(n, k + winSize - 1); obp > k + ulength; obp--) if(ptype[k][obp]) temp += pR[k][obp] * exp_E_Hairpin(obp-k-1, ptype[k][obp], S1[k+1], S1[obp-1], sequence+k-1, pf_params) * scale[obp-k+1]; for(obp = MIN2(n, MIN2(k + winSize - 1, k + ulength)); obp > k + 1; obp--){ if (ptype[k][obp]) temp += pR[k][obp] * exp_E_Hairpin(obp-k-1, ptype[k][obp], S1[k+1], S1[obp-1], sequence+k-1, pf_params) * scale[obp-k+1]; QBE[obp-k-1] += temp; /* add hairpins to QBE (all in one array) */ } /* doubling the code to get the if out of the loop */ /* Add up Multiloopterms qmb[l][m]+=prml[m]*dang; q2l[l][m]+=(prml[m]-prm_l[m])*dang; */ temp=0.; for(len = winSize; len >= ulength; len--) temp += q2l[k][len] * expMLbase[len]; for( ; len > 0; len--){ temp += q2l[k][len] * expMLbase[len]; QBE[len] += temp; /* add (()()____) type cont. to I3 */ } for(len = 1; len < ulength; len++){ for(obp = k + len + TURN; obp <= MIN2(n, k + winSize - 1); obp++){ /* add (()___()) */ QBE[len] += qmb[k][obp-k-1] * qm[k+len+1/*2*/][obp-1] * expMLbase[len]; } } for (len=1; len<ulength; len++) { for (obp=k+len+TURN+TURN; obp<=MIN2(n,k+winSize-1); obp++) { if (ptype[k][obp]) { temp = exp_E_MLstem(rtype[ptype[k][obp]], S1[obp-1], S1[k+1], pf_params) * scale[2] * expMLbase[len] * expMLclosing; /* k:obp */ QBE[len] += pR[k][obp] * temp * qm2[k+len+1][obp-1]; /* add (___()()) */ } } } /* After computing all these contributions in QBE[len], that k is paired and the unpaired stretch is AT LEAST len long, we start to add that to the old unpaired thingies; */ for(len = 1; len < MIN2(MAX2(ulength, MAXLOOP), n - k); len++){ pU[k+len][len] += pU[k+len][len+1] + QBE[len]; } /*open chain*/ if ((ulength>=winSize)&&(k>=ulength)) { pU[k][winSize]=scale[winSize]/q[k-winSize+1][k]; } /* now the not enclosed by any base pair terms for whatever it is we do not need anymore... ... which should be e.g; k, again */ for(startu = MIN2(ulength, k); startu > 0; startu--){ temp=0.; for(i5 = MAX2(1, k - winSize + 2); i5 <= MIN2(k - startu, n - winSize + 1); i5++){ temp += q[i5][k - startu] * q[k + 1][i5 + winSize - 1] * scale[startu]/q[i5][i5 + winSize - 1]; } /* the 2 Cases where the borders are on the edge of the interval */ if((k >= winSize) && (startu + 1 <= winSize)) temp += q[k - winSize + 1][k - startu]*scale[startu]/q[k - winSize + 1][k]; if((k <= n - winSize+ startu) && (k - startu >= 0) && (k < n) && (startu + 1 <= winSize)) temp += q[k + 1][k - startu + winSize] * scale[startu] / q[k - startu + 1][k - startu + winSize]; /* Divide by number of possible windows */ pU[k][startu] += temp; { int leftmost, rightmost; leftmost = MAX2(1, k - winSize + 1); rightmost = MIN2(n - winSize + 1, k - startu + 1); pU[k][startu] /= (rightmost - leftmost + 1); } } free(QBE); return; } PRIVATE void putoutpU(double **pUx, int k, int ulength, FILE *fp) { /*put out unpaireds for k, and free pU[k], make sure we don't need pU[k] any more!!*/ /*could use that for hairpins, also!*/ int i; fprintf(fp,"%d\t",k); for (i=1; i<=MIN2(ulength,k); i++) { fprintf(fp,"%.5g\t",pUx[k][i]); } fprintf(fp,"\n"); free(pUx[k]); } PRIVATE void putoutpU_splitup(double **pUx, int k, int ulength, FILE *fp, char ident) { /*put out unpaireds for k, and free pU[k], make sure we don't need pU[k] any more!!*/ /*could use that for hairpins, also!*/ int i; fprintf(fp,"%d\t",k); for (i=1; i<=MIN2(ulength,k); i++) { fprintf(fp,"%.5g\t",pUx[k][i]); } fprintf(fp,"\t%s\n",ident); free(pUx[k]); } PUBLIC void putoutpU_prob(double **pU,int length, int ulength, FILE *fp, int energies) { putoutpU_prob_par(pU, length, ulength, fp, energies, pf_params); } PUBLIC void putoutpU_prob_par(double **pU,int length, int ulength, FILE *fp, int energies, pf_paramT *parameters){ /*put out unpaireds */ int i,k; double kT = parameters->kT/1000.0; double temp; if (energies) fprintf(fp,"#opening energies\n #i$\tl="); else fprintf(fp,"#unpaired probabilities\n #i$\tl="); for (i=1; i<=ulength; i++) { fprintf(fp,"%d\t", i); } fprintf(fp,"\n"); for (k=1; k<=length; k++){ fprintf(fp,"%d\t",k); for (i=1; i<=ulength; i++) { if (i>k) { fprintf(fp,"NA\t"); continue; } if (energies) temp=-log(pU[k][i])*kT; else temp=pU[k][i]; fprintf(fp,"%.7g\t",temp); } fprintf(fp,"\n"); free(pU[k]); } fflush(fp); } PUBLIC void putoutpU_prob_bin(double **pU,int length, int ulength, FILE *fp, int energies) { putoutpU_prob_bin_par(pU, length, ulength, fp, energies, pf_params); } PUBLIC void putoutpU_prob_bin_par(double **pU,int length, int ulength, FILE *fp, int energies, pf_paramT *parameters) { /*put out unpaireds */ int i,k; double kT= parameters->kT/1000.0; double temp; int *p; p = (int*) space(sizeof(int)*1); /* write first line */ p[0]=ulength; /* u length */ fwrite(p,sizeof(int),1,fp); p[0]=length; /* seq length */ fwrite(p,sizeof(int),1,fp); for (k=3; k<=(length+20); k++){ /* all the other lines are set to 1000000 because we are at ulength=0 */ p[0]=1000000; fwrite(p,sizeof(int),1,fp); } /* data */ for (i=1; i<=ulength; i++) { for (k=1; k<=11; k++){/* write first ten entries to 1000000 */ p[0]=1000000; fwrite(p,sizeof(int),1,fp); } for (k=1; k<=length; k++){/* write data now */ if (i>k) { p[0]=1000000; /* check if u > pos */ fwrite(p,sizeof(int),1,fp); continue; } else{ p[0]= (int) rint(100 *(-log(pU[k][i])*kT)); fwrite(p,sizeof(int),1,fp); } } for (k=1; k<=9; k++){/* finish by writing the last 10 entries */ p[0]=1000000; fwrite(p,sizeof(int),1,fp); } } /* free pU array; */ for (k=1; k<=length; k++){ free(pU[k]); } free(p); fflush(fp); } /* Here: Space for questions... */ PRIVATE void compute_pU_splitup(int k, int ulength, double **pU, double **pUO, double **pUH, double **pUI, double **pUM, int winSize,int n, char *sequence) { /* here, we try to add a function computing all unpaired probabilities starting at some i, going down to $unpaired, to be unpaired, i.e. a list with entries from 1 to unpaired for every i, with the probability of a stretch of length x, starting at i-x+1, to be unpaired */ int startu; int i5; int j3, len, obp; double temp; double *QBE; double *QBI; double *QBM; double *QBH; FLT_OR_DBL expMLclosing = pf_params->expMLclosing; QBE=(double *) space((MAX2(ulength,MAXLOOP)+2)*sizeof(double)); QBM=(double *) space((MAX2(ulength,MAXLOOP)+2)*sizeof(double)); QBI=(double *) space((MAX2(ulength,MAXLOOP)+2)*sizeof(double)); QBH=(double *) space((MAX2(ulength,MAXLOOP)+2)*sizeof(double)); /* first, we will */ /* for k<=ulength, pU[k][k]=0, because no bp can enclose it */ if (pUoutput&&k+ulength<=n) pU[k+ulength]=(double *)space((ulength+2)*sizeof(double)); /*compute pu[k+ulength][ulength] */ for (i5=MAX2(k+ulength-winSize+1,1);i5<=k;i5++) { for (j3=k+ulength+1; j3<=MIN2(n,i5+winSize-1); j3++) { /* if (k>400) { printf("i%d j%d ",i5,j3); fflush(stdout); } */ if (ptype[i5][j3]!=0) {/**/ /* (.. >-----|..........) i5 j j+ulength j3 */ /*Multiloops*/ temp = (i5<k) ? qm2[i5+1][k] * expMLbase[j3-k-1] : 0.; /* (..{}{}-----|......) */ if(j3-1>k+ulength) temp += qm2[k+ulength+1][j3-1] * expMLbase[k+ulength-i5]; /* (..|-----|{}{}) */ if((i5<k)&&(j3-1>k+ulength)) temp += qm[i5+1][k] * qm[k+ulength+1][j3-1] * expMLbase[ulength]; /* ({}|-----|{}) */ /* add dangles, multloopclosing etc. */ temp *= exp_E_MLstem(rtype[ptype[i5][j3]], S1[j3-1], S1[i5+1], pf_params) * scale[2] * expMLclosing; /*add hairpins*/ temp += exp_E_Hairpin(j3-i5-1, ptype[i5][j3], S1[i5+1], S1[j3-1], sequence+i5-1, pf_params) * scale[j3-i5+1]; /*add outer probability*/ temp *= pR[i5][j3]; pU[k+ulength][ulength] += temp; } } } /* code doubling to avoid if within loop */ temp=0.; for (len=winSize; len>=MAX2(ulength,MAXLOOP); len--) temp+=QI5[k][len]; for (;len>0; len--) { temp += QI5[k][len]; QBI[len] += temp; QBE[len] += temp; /* replace QBE with QI */ } /* Add Hairpinenergy to QBE */ temp=0.; for(obp = MIN2(n, k + winSize - 1); obp > k + ulength; obp--) if(ptype[k][obp]) temp += pR[k][obp] * exp_E_Hairpin(obp-k-1, ptype[k][obp], S1[k+1], S1[obp-1], sequence+k-1, pf_params) * scale[obp-k+1]; for(obp = MIN2(n, MIN2(k + winSize - 1, k + ulength)); obp > k + 1; obp--){ if (ptype[k][obp]) temp += pR[k][obp] * exp_E_Hairpin(obp-k-1, ptype[k][obp], S1[k+1], S1[obp-1], sequence+k-1, pf_params) * scale[obp-k+1]; QBH[obp-k-1] += temp; QBE[obp-k-1] += temp; /* add hairpins to QBE (all in one array) */ } /* doubling the code to get the if out of the loop */ /* Add up Multiloopterms qmb[l][m]+=prml[m]*dang; q2l[l][m]+=(prml[m]-prm_l[m])*dang; */ temp=0.; for(len = winSize; len >= ulength; len--) temp += q2l[k][len] * expMLbase[len]; for( ; len > 0; len--){ temp += q2l[k][len] * expMLbase[len]; QBM[len] += temp; QBE[len] += temp; /* add (()()____) type cont. to I3 */ } for(len = 1; len < ulength; len++){ for(obp = k + len + TURN; obp <= MIN2(n, k + winSize - 1); obp++){ /* add (()___()) */ QBM[len] += qmb[k][obp-k-1] * qm[k+len+1/*2*/][obp-1] * expMLbase[len]; QBE[len] += qmb[k][obp-k-1] * qm[k+len+1/*2*/][obp-1] * expMLbase[len]; } } for (len=1; len<ulength; len++) { for (obp=k+len+TURN+TURN; obp<=MIN2(n,k+winSize-1); obp++) { if (ptype[k][obp]) { temp = exp_E_MLstem(rtype[ptype[k][obp]], S1[obp-1], S1[k+1], pf_params) * scale[2] * expMLbase[len] * expMLclosing; /* k:obp */ QBE[len] += pR[k][obp] * temp * qm2[k+len+1][obp-1]; /* add (___()()) */ QBM[len] += pR[k][obp] * temp * qm2[k+len+1][obp-1]; /* add (___()()) */ } } } /* After computing all these contributions in QBE[len], that k is paired and the unpaired stretch is AT LEAST len long, we start to add that to the old unpaired thingies; */ for(len = 1; len < MIN2(MAX2(ulength, MAXLOOP), n - k); len++){ pU[k+len][len] += pU[k+len][len+1] + QBE[len]; pUH[k+len][len] += pUH[k+len][len+1] + QBH[len]; pUM[k+len][len] += pUM[k+len][len+1] + QBM[len]; pUI[k+len][len] += pUI[k+len][len+1] + QBI[len]; } /* open chain */ if ((ulength>=winSize)&&(k>=ulength)) { pUO[k][winSize]=scale[winSize]/q[k-winSize+1][k]; } /*open chain*/ if ((ulength>=winSize)&&(k>=ulength)) { pU[k][winSize]=scale[winSize]/q[k-winSize+1][k]; } /* now the not enclosed by any base pair terms for whatever it is we do not need anymore... ... which should be e.g; k, again */ for(startu = MIN2(ulength, k); startu > 0; startu--){ temp=0.; for(i5 = MAX2(1, k - winSize + 2); i5 <= MIN2(k - startu, n - winSize + 1); i5++){ temp += q[i5][k - startu] * q[k + 1][i5 + winSize - 1] * scale[startu]/q[i5][i5 + winSize - 1]; } /* the 2 Cases where the borders are on the edge of the interval */ if((k >= winSize) && (startu + 1 <= winSize)) temp += q[k - winSize + 1][k - startu]*scale[startu]/q[k - winSize + 1][k]; if((k <= n - winSize+ startu) && (k - startu >= 0) && (k < n) && (startu + 1 <= winSize)) temp += q[k + 1][k - startu + winSize] * scale[startu] / q[k - startu + 1][k - startu + winSize]; /* Divide by number of possible windows */ pU[k][startu] += temp; pUO[k][startu] += temp; { int leftmost, rightmost; leftmost = MAX2(1, k - winSize + 1); rightmost = MIN2(n - winSize + 1, k - startu + 1); pU[k][startu] /= (rightmost - leftmost + 1); /*Do we want to make a distinction between those?*/ pUH[k][startu] /= (rightmost - leftmost + 1); pUO[k][startu] /= (rightmost - leftmost + 1); pUI[k][startu] /= (rightmost - leftmost + 1); pUM[k][startu] /= (rightmost - leftmost + 1); } } free(QBE); free(QBI); free(QBH); free(QBM); return; } PUBLIC void putoutpU_prob_splitup(double **pU, double **pUO, double **pUH, double **pUI, double **pUM, int length, int ulength, FILE *fp, int energies) { /*put out unpaireds */ int i,k; double kT= (temperature+K0)*GASCONST/1000.0; double temp; if (energies) fprintf(fp,"#opening energies\n #i$\tl="); else fprintf(fp,"#unpaired probabilities\n #i$\tl="); fprintf(fp,"Total\n"); for (i=1; i<=ulength; i++) { fprintf(fp,"%d\t", i); } fprintf(fp,"\n"); for (k=1; k<=length; k++){ fprintf(fp,"%d\t",k); for (i=1; i<=ulength; i++) { if (i>k) { fprintf(fp,"NA\t"); continue; } if (energies) temp=-log(pU[k][i])*kT; else temp=pU[k][i]; fprintf(fp,"%.7g\t",temp); } fprintf(fp,"\tT\n"); free(pU[k]); } fprintf(fp,"\n###################################################################\nHairpin\n"); for (i=1; i<=ulength; i++) { fprintf(fp,"%d\t", i); } fprintf(fp,"\n"); for (k=1; k<=length; k++){ fprintf(fp,"%d\t",k); for (i=1; i<=ulength; i++) { if (i>k) { fprintf(fp,"NA\t"); continue; } if (energies) temp=-log(pUH[k][i])*kT; else temp=pUH[k][i]; fprintf(fp,"%.7g\t",temp); } fprintf(fp,"\tH\n"); free(pUH[k]); } fprintf(fp,"\n###################################################################\nInterior\n"); for (i=1; i<=ulength; i++) { fprintf(fp,"%d\t", i); } fprintf(fp,"\n"); for (k=1; k<=length; k++){ fprintf(fp,"%d\t",k); for (i=1; i<=ulength; i++) { if (i>k) { fprintf(fp,"NA\t"); continue; } if (energies) temp=-log(pUI[k][i])*kT; else temp=pUI[k][i]; fprintf(fp,"%.7g\t",temp); } fprintf(fp,"\tI\n"); free(pUI[k]); } fprintf(fp,"\n###################################################################\nMultiloop\n"); for (i=1; i<=ulength; i++) { fprintf(fp,"%d\t", i); } fprintf(fp,"\n"); for (k=1; k<=length; k++){ fprintf(fp,"%d\t",k); for (i=1; i<=ulength; i++) { if (i>k) { fprintf(fp,"NA\t"); continue; } if (energies) temp=-log(pUM[k][i])*kT; else temp=pUM[k][i]; fprintf(fp,"%.7g\t",temp); } fprintf(fp,"\tM\n"); free(pUM[k]); } fprintf(fp,"\n###################################################################\nExterior\n"); for (i=1; i<=ulength; i++) { fprintf(fp,"%d\t", i); } fprintf(fp,"\t E\n"); for (k=1; k<=length; k++){ fprintf(fp,"%d\t",k); for (i=1; i<=ulength; i++) { if (i>k) { fprintf(fp,"NA\t"); continue; } if (energies) temp=-log(pUO[k][i])*kT; else temp=pUO[k][i]; fprintf(fp,"%.7g\t",temp); } fprintf(fp,"\n"); free(pU[k]); } fflush(fp); } /*###########################################*/ /*# deprecated functions below #*/ /*###########################################*/ PUBLIC void init_pf_foldLP(int length){ /* DO NOTHING */}
csrmv_merge.h
#ifndef __CSRMV_MERGE_H__ #define __CSRMV_MERGE_H__ #include <algorithm> #include "complex_ops.h" #include "openmp.h" #include "numpy/ndarraytypes.h" // See work my Merrill et. al. (http://ieeexplore.ieee.org/abstract/document/7877136/) for original work and implementation. // This code contains modified versions of algorithms 2 and 3. template<class I> class CountingInputIterator{ const I init; public: CountingInputIterator(I _init) : init(_init) {} I operator[](I i){return init+i;} }; template<class I> struct CoordinateT{ I x,y; CoordinateT(I _x,I _y) : x(_x), y(_y) {} }; template<class I,class AIteratorT,class BIteratorT> CoordinateT<I> MergePathSearch(I diagonal, I a_len, I b_len, AIteratorT a, BIteratorT b) { // Diagonal search range (in x coordinate space) I zero = 0; I x_min = std::max(diagonal - b_len, zero); I x_max = std::min(diagonal, a_len); // 2D binary-search along the diagonal search range while (x_min < x_max) { I pivot = (x_min + x_max) >> 1; if (a[pivot] <= b[diagonal - pivot - 1]) { // Keep top-right half of diagonal range x_min = pivot + 1; } else { // Keep bottom-left half of diagonal range x_max = pivot; } } return CoordinateT<I>( std::min(x_min, a_len), // x coordinate in A diagonal - x_min); // y coordinate in B } template<class I,class T1,class T2,class T3> void csrmv_merge(const bool overwrite_y, const I num_rows, const I row_offsets[], const I column_indices[], const T1 values[], const T2 alpha, const T3 x[], I row_carry_out[], T3 value_carry_out[], T3 y[]) { const I* row_end_offsets = row_offsets + 1; // Merge list A: row end-offsets const I num_nonzeros = row_offsets[num_rows]; int num_threads = omp_get_num_threads(); CountingInputIterator<I> nz_indices(0); // Merge list B: Natural numbers(NZ indices) I num_merge_items = num_rows + num_nonzeros; // Merge path total length I items_per_thread = (num_merge_items + num_threads - 1) / num_threads; // Merge items per thread if(overwrite_y){ #pragma omp for schedule(static) for(I i=0;i<num_rows;i++){ y[i] = T3(0); } } // Spawn parallel threads #pragma omp for schedule(static,1) for (int tid = 0; tid < num_threads; tid++) { // Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread I diagonal = std::min(items_per_thread * tid, num_merge_items); I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items); CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices); CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices); // Consume merge items, whole rows first T3 running_total = 0.0; for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x) { for (; thread_coord.y < row_end_offsets[thread_coord.x]; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y]]; y[thread_coord.x] += alpha * running_total; running_total = 0.0; } // Consume partial portion of thread's last row for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y]]; // Save carry-outs row_carry_out[tid] = thread_coord_end.x; value_carry_out[tid] = running_total; } // Carry-out fix-up (rows spanning multiple threads) #pragma omp single { for (int tid = 0; tid < num_threads - 1; ++tid) if (row_carry_out[tid] < num_rows) y[row_carry_out[tid]] += alpha * value_carry_out[tid]; } } template<class I,class T1,class T2,class T3> void csrmv_merge_strided(const bool overwrite_y, const I num_rows, const I row_offsets[], const I column_indices[], const T1 values[], const T2 alpha, const npy_intp stride_x, const T3 x[], I row_carry_out[], T3 value_carry_out[], const npy_intp stride_y, T3 y[]) { const I* row_end_offsets = row_offsets + 1; // Merge list A: row end-offsets const I num_nonzeros = row_offsets[num_rows]; int num_threads = omp_get_num_threads(); CountingInputIterator<I> nz_indices(0); // Merge list B: Natural numbers(NZ indices) I num_merge_items = num_rows + num_nonzeros; // Merge path total length I items_per_thread = (num_merge_items + num_threads - 1) / num_threads; // Merge items per thread if(overwrite_y){ #pragma omp for schedule(static) for(I i=0;i<num_rows;i++){ y[i * stride_y] = 0; } } // Spawn parallel threads #pragma omp for schedule(static,1) for (int tid = 0; tid < num_threads; tid++) { // Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread I diagonal = std::min(items_per_thread * tid, num_merge_items); I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items); CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices); CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices); // Consume merge items, whole rows first T3 running_total = 0.0; for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x) { for (; thread_coord.y < row_end_offsets[thread_coord.x]; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y] * stride_x]; y[thread_coord.x * stride_y] += alpha * running_total; running_total = 0.0; } // Consume partial portion of thread's last row for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y] * stride_x]; // Save carry-outs row_carry_out[tid] = thread_coord_end.x; value_carry_out[tid] = running_total; } // Carry-out fix-up (rows spanning multiple threads) #pragma omp single { for (int tid = 0; tid < num_threads - 1; ++tid) if (row_carry_out[tid] < num_rows) y[row_carry_out[tid] * stride_y] += alpha * value_carry_out[tid]; } } #endif
multi_reduction.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main() { int res1 = 0; double res2 = 0.0; #pragma omp parallel { #pragma omp for reduction(+:res1) for(int i = 0; i < omp_get_num_threads(); i++) { int rank = omp_get_thread_num(); res1 = rank; } #pragma omp for reduction(+:res2) for(int i = 0; i < omp_get_num_threads(); i++) { double rank = omp_get_thread_num() + 0.5; res2 = rank; } } printf("%d, %f\n", res1, res2); }
relu1_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: bzhang@openailab.com */ #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" int ref_relu1_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = src[i]; if (dst[i] > 1) dst[i] = 1; if (dst[i] < -1) dst[i] = -1; } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); ref_relu1_fp32(input_tensor, output_tensor, exec_graph->num_thread); return 0; } static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* node = exec_node->ir_node; struct ir_graph* ir_graph = node->graph; struct ir_tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]); struct ir_tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]); int ret = set_ir_tensor_shape(output, input->dims, input->dim_num); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = NULL, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_relu1_hcl_ops(void* arg) { return register_builtin_node_ops(OP_RELU1, &hcl_node_ops); } static int unreg_relu1_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_RELU1, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_relu1_hcl_ops); AUTO_UNREGISTER_OPS(unreg_relu1_hcl_ops);
GB_unaryop__ainv_fp64_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp64_int32 // op(A') function: GB_tran__ainv_fp64_int32 // C type: double // A type: int32_t // cast: double cij = (double) aij // unaryop: cij = -aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ double z = (double) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp64_int32 ( double *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_fp64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp64_uint64 // op(A') function: GB_tran__identity_fp64_uint64 // C type: double // A type: uint64_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp64_uint64 ( double *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ImageWaveformUtils.h
#ifndef CAPTURE3_IMAGE_WAVEFORM_UTILS_H #define CAPTURE3_IMAGE_WAVEFORM_UTILS_H #include <cmath> #include <vector> #include <omp.h> #include <opencv2/core.hpp> #include <opencv2/imgproc.hpp> #include <QtGui/QImage> #include "../engine/objects/image/ImageSize.h" #include "../engine/objects/image/ImageChannel.h" namespace Capture3 { static void generateWaveform( const ImageSize &imageSize, const ImageChannel &imageChannel, QImage &outputX, QImage &outputY, QImage &outputZ ) { // Get output data const auto outputWidth = (unsigned int) outputX.width(); const auto outputHeight = (unsigned int) outputX.height(); const unsigned int outputArea = outputWidth * outputHeight; const cv::Size outputSize(outputWidth, outputHeight); unsigned char *outputDataX = outputX.bits(); unsigned char *outputDataY = outputY.bits(); unsigned char *outputDataZ = outputZ.bits(); // Get image data const unsigned int imageWidth = imageSize.getWidth(); const unsigned int imageHeight = imageSize.getHeight(); const double *imageData = imageChannel.getData(); // Create height map const unsigned int mapWidth = std::min(imageWidth, outputWidth * 4); const unsigned int mapHeight = std::min(imageHeight, outputHeight * 4); const unsigned int mapMax = mapHeight - 1; const double mapScaleX = imageWidth / (double) mapWidth; const double mapScaleY = imageHeight / (double) mapHeight; const cv::Size mapSize(mapWidth, mapHeight); cv::Mat map(mapSize, CV_64FC3, cv::Scalar(0)); auto *mapData = (double *) map.data; // Iterate over pixels and convert pixel value to position #pragma omp parallel for schedule(static) for (unsigned int x = 0; x < mapWidth; x++) { for (unsigned int y = 0; y < mapHeight; y++) { // Calculate position of pixel to sample const auto imageX = (unsigned int) lround(x * mapScaleX); const auto imageY = (unsigned int) lround(y * mapScaleY); // Fetch values from channels const unsigned int index = (imageY * imageWidth + imageX) * 3; double valueX = 1.0 - imageData[index + 0]; double valueY = 1.0 - imageData[index + 1]; double valueZ = 1.0 - imageData[index + 2]; valueX = valueX < 0 ? 0 : valueX > 1 ? 1 : valueX; valueY = valueY < 0 ? 0 : valueY > 1 ? 1 : valueY; valueZ = valueZ < 0 ? 0 : valueZ > 1 ? 1 : valueZ; // Convert color to height const auto heightX = (unsigned int) lround(valueX * mapMax); const auto heightY = (unsigned int) lround(valueY * mapMax); const auto heightZ = (unsigned int) lround(valueZ * mapMax); // Store value mapData[(heightX * mapWidth + x) * 3 + 0] += valueX; mapData[(heightY * mapWidth + x) * 3 + 1] += valueY; mapData[(heightZ * mapWidth + x) * 3 + 2] += valueZ; } } // Scale map to output size cv::Mat scaled; cv::resize(map, scaled, outputSize, 0, 0, cv::INTER_AREA); const double *scaledData = (double *) scaled.data; // Find max value double maxX = 0.0001; double maxY = 0.0001; double maxZ = 0.0001; for (unsigned int i = 0; i < outputArea; i++) { const unsigned int index = i * 3; maxX = scaledData[index + 0] > maxX ? scaledData[index + 0] : maxX; maxY = scaledData[index + 1] > maxY ? scaledData[index + 1] : maxY; maxZ = scaledData[index + 2] > maxZ ? scaledData[index + 2] : maxZ; } #pragma omp parallel for schedule(static) for (unsigned int i = 0; i < outputArea; i++) { // Calculate input and output index const unsigned int indexInput = i * 3; const unsigned int indexOutput = i * 4; // Fetch and normalize values double valueX = scaledData[indexInput + 0] / maxX; double valueY = scaledData[indexInput + 1] / maxY; double valueZ = scaledData[indexInput + 2] / maxZ; valueX = std::sin(valueX * M_PI_2); valueY = std::sin(valueY * M_PI_2); valueZ = std::sin(valueZ * M_PI_2); valueX = valueX < 0 ? 0 : valueX > 1 ? 1 : valueX; valueY = valueY < 0 ? 0 : valueY > 1 ? 1 : valueY; valueZ = valueZ < 0 ? 0 : valueZ > 1 ? 1 : valueZ; // Calculate output colors const auto colorX = (unsigned int) lround((valueX * 130.0) + 40.0); const auto colorY = (unsigned int) lround((valueY * 130.0) + 40.0); const auto colorZ = (unsigned int) lround((valueZ * 130.0) + 40.0); // Store them outputDataX[indexOutput + 0] = (unsigned char) colorX; outputDataX[indexOutput + 1] = (unsigned char) colorX; outputDataX[indexOutput + 2] = (unsigned char) colorX; outputDataX[indexOutput + 3] = 255; outputDataY[indexOutput + 0] = (unsigned char) colorY; outputDataY[indexOutput + 1] = (unsigned char) colorY; outputDataY[indexOutput + 2] = (unsigned char) colorY; outputDataY[indexOutput + 3] = 255; outputDataZ[indexOutput + 0] = (unsigned char) colorZ; outputDataZ[indexOutput + 1] = (unsigned char) colorZ; outputDataZ[indexOutput + 2] = (unsigned char) colorZ; outputDataZ[indexOutput + 3] = 255; } map.release(); scaled.release(); } } #endif // CAPTURE3_IMAGE_WAVEFORM_UTILS_H
GB_sparse_masker_template.c
//------------------------------------------------------------------------------ // GB_sparse_masker_template: R = masker (C, M, Z) where R is sparse/hyper //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Computes C<M>=Z or C<!M>=Z, returning the result in R, which is sparse or // hypersparse. The input matrix C is not modified. Effectively, this // computes R=C and then R<M>=Z or R<!M>=Z. If the C_replace descriptor is // enabled, then C has already been cleared, and is an empty (but non-NULL) // matrix. // phase1: does not compute R itself, but just counts the # of entries in each // vector of R. Fine tasks compute the # of entries in their slice of a // single vector of R, and the results are cumsum'd. // phase2: computes R, using the counts computed by phase1. // C is sparse or hypersparse. M and Z can have any sparsity structure. // ------------------------------------------ // C <!M> = Z R // ------------------------------------------ // sparse sparse sparse sparse // sparse bitmap sparse sparse // sparse full sparse sparse // ------------------------------------------ // C <M> = Z R // ------------------------------------------ // sparse sparse sparse sparse // sparse sparse bitmap sparse // sparse sparse full sparse // sparse bitmap sparse sparse // sparse full sparse sparse // FUTURE:: add special cases for C==Z, C==M, and Z==M aliases //------------------------------------------------------------------------------ // R(i,j) = Z(i,j) when Z is sparse or hypersparse //------------------------------------------------------------------------------ #undef GB_COPY_Z #if defined ( GB_PHASE_1_OF_2 ) #define GB_COPY_Z \ { \ rjnz++ ; \ } #elif defined ( GB_ISO_MASKER ) #define GB_COPY_Z \ { \ Ri [pR] = i ; \ pR++ ; \ } #else #define GB_COPY_Z \ { \ Ri [pR] = i ; \ memcpy (Rx +(pR)*rsize, Zx +(Z_iso ? 0:(pZ)*rsize), rsize) ; \ pR++ ; \ } #endif //------------------------------------------------------------------------------ // R(i,j) = Z(i,j) when Z is bitmap or full //------------------------------------------------------------------------------ #undef GB_COPY_Z_BITMAP_OR_FULL #if defined ( GB_PHASE_1_OF_2 ) #define GB_COPY_Z_BITMAP_OR_FULL \ { \ rjnz += GBB (Zb, pZ_start + i - iZ_first) ; \ } #elif defined ( GB_ISO_MASKER ) #define GB_COPY_Z_BITMAP_OR_FULL \ { \ int64_t pZ = pZ_start + i - iZ_first ; \ if (GBB (Zb, pZ)) \ { \ Ri [pR] = i ; \ pR++ ; \ } \ } #else #define GB_COPY_Z_BITMAP_OR_FULL \ { \ int64_t pZ = pZ_start + i - iZ_first ; \ if (GBB (Zb, pZ)) \ { \ Ri [pR] = i ; \ memcpy (Rx +(pR)*rsize, Zx +(Z_iso ? 0:(pZ)*rsize), rsize) ; \ pR++ ; \ } \ } #endif //------------------------------------------------------------------------------ // R(i,j) = C(i,j) //------------------------------------------------------------------------------ #undef GB_COPY_C #if defined ( GB_PHASE_1_OF_2 ) #define GB_COPY_C \ { \ rjnz++ ; \ } #elif defined ( GB_ISO_MASKER ) #define GB_COPY_C \ { \ Ri [pR] = i ; \ pR++ ; \ } #else #define GB_COPY_C \ { \ Ri [pR] = i ; \ memcpy (Rx +(pR)*rsize, Cx +(C_iso ? 0:(pC)*rsize), rsize) ; \ pR++ ; \ } #endif //------------------------------------------------------------------------------ // template for R = masker (C, M, Z) when R is sparse or hypersparse //------------------------------------------------------------------------------ { //-------------------------------------------------------------------------- // phase1: count entries in each C(:,j) // phase2: compute C //-------------------------------------------------------------------------- ASSERT (C_is_sparse || C_is_hyper) ; #pragma omp parallel for num_threads(R_nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < R_ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = TaskList [taskid].kfirst ; int64_t klast = TaskList [taskid].klast ; bool fine_task = (klast == -1) ; int64_t len ; if (fine_task) { // a fine task operates on a slice of a single vector klast = kfirst ; len = TaskList [taskid].len ; } else { // a coarse task operates on one or more whole vectors len = vlen ; } //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of R //------------------------------------------------------------------ int64_t j = GBH (Rh, k) ; #if defined ( GB_PHASE_1_OF_2 ) int64_t rjnz = 0 ; #else int64_t pR, pR_end ; if (fine_task) { // A fine task computes a slice of R(:,j) pR = TaskList [taskid ].pC ; pR_end = TaskList [taskid+1].pC ; ASSERT (Rp [k] <= pR && pR <= pR_end && pR_end <= Rp [k+1]) ; } else { // The vectors of R are never sliced for a coarse task. pR = Rp [k] ; pR_end = Rp [k+1] ; } int64_t rjnz = pR_end - pR ; if (rjnz == 0) { continue ; } #endif //------------------------------------------------------------------ // get C(:,j) //------------------------------------------------------------------ int64_t pC = -1, pC_end = -1 ; if (fine_task) { // A fine task operates on Ci,Cx [pC...pC_end-1], which is // a subset of the vector C(:,j) pC = TaskList [taskid].pA ; pC_end = TaskList [taskid].pA_end ; } else { // A coarse task operates on the entire vector C(:,j) int64_t kC = (R_to_C == NULL) ? j : R_to_C [k] ; if (kC >= 0) { pC = Cp [kC] ; pC_end = Cp [kC+1] ; } } int64_t cjnz = pC_end - pC ; // nnz in C(:,j) for this slice bool cdense = (cjnz == len) && (cjnz > 0) ; #if defined ( GB_PHASE_2_OF_2 ) || defined ( GB_DEBUG ) // get the first index in C(:,j) for this vector int64_t iC_first = -1 ; if (cjnz > 0) iC_first = Ci [pC] ; #endif #ifdef GB_DEBUG int64_t iC_last = -1 ; if (cjnz > 0) iC_last = Ci [pC_end-1] ; #endif //------------------------------------------------------------------ // get Z(:,j) //------------------------------------------------------------------ int64_t pZ = -1, pZ_end = -1 ; if (fine_task) { // A fine task operates on Zi,Zx [pZ...pZ_end-1], which is // a subset of the vector Z(:,j) pZ = TaskList [taskid].pB ; pZ_end = TaskList [taskid].pB_end ; } else { // A coarse task operates on the entire vector Z(:,j) int64_t kZ = (R_to_Z == NULL) ? j : R_to_Z [k] ; if (kZ >= 0) { pZ = GBP (Zp, kZ, vlen) ; pZ_end = GBP (Zp, kZ+1, vlen) ; } } int64_t zjnz = pZ_end - pZ ; // nnz in Z(:,j) for this slice int64_t pZ_start = pZ ; bool zdense = (zjnz == len) && (zjnz > 0) ; int64_t iZ_first = -1, iZ_last = -1 ; if (zjnz > 0) { iZ_first = GBI (Zi, pZ, vlen) ; iZ_last = GBI (Zi, pZ_end-1, vlen) ; } //------------------------------------------------------------------ // get M(:,j) //------------------------------------------------------------------ int64_t pM = -1, pM_end = -1 ; if (fine_task) { // A fine task operates on Mi,Mx [pM...pM_end-1], which is // a subset of the vector M(:,j) pM = TaskList [taskid].pM ; pM_end = TaskList [taskid].pM_end ; } else { // A coarse task operates on the entire vector M (:,j) int64_t kM = (R_to_M == NULL) ? j : R_to_M [k] ; if (kM >= 0) { pM = GBP (Mp, kM, vlen) ; pM_end = GBP (Mp, kM+1, vlen) ; } } int64_t mjnz = pM_end - pM ; // nnz (M (:,j)) bool mdense = (mjnz == len) && (mjnz > 0) ; // get the first index in M(:,j) for this vector int64_t iM_first = -1 ; int64_t pM_first = pM ; if (mjnz > 0) iM_first = GBI (Mi, pM_first, vlen) ; //------------------------------------------------------------------ // R(:,j) = masker (C (:,j), M (:,j), Z (:,j)) //------------------------------------------------------------------ if (Z_is_bitmap || Z_is_full) { //-------------------------------------------------------------- // Method01: Z is bitmap or full; M is sparse or hypersparse //-------------------------------------------------------------- // ------------------------------------------ // C <M> = Z R // ------------------------------------------ // sparse sparse bitmap sparse // sparse sparse full sparse // M is sparse or hypersparse, and not complemented. // Otherwise, R is bitmap and not computed here, but in // GB_bitmap_masker_template instead. ASSERT (M_is_sparse || M_is_hyper) ; ASSERT (!Mask_comp) ; // 2-way merge of C(:,j) and M(:,j) and direct lookup of Z while (pC < pC_end && pM < pM_end) { int64_t iC = Ci [pC] ; int64_t iM = Mi [pM] ; if (iC < iM) { // C(i,j) is present but M(i,j) is not // R(i,j) = C(i,j) int64_t i = iC ; GB_COPY_C ; pC++ ; } else if (iC > iM) { // M(i,j) is present but C(i,j) is not int64_t i = iM ; bool mij = GB_mcast (Mx, pM, msize) ; if (mij) { // R(i,j) = Z(i,j) GB_COPY_Z_BITMAP_OR_FULL ; } pM++ ; } else { // both C(i,j) and M(i,j) are present int64_t i = iM ; bool mij = GB_mcast (Mx, pM, msize) ; if (mij) { // R(i,j) = Z(i,j) GB_COPY_Z_BITMAP_OR_FULL ; } else { // R(i,j) = C(i,j) GB_COPY_C ; } pC++ ; pM++ ; } } // if M(:,j) is exhausted ; continue scanning all of C(:,j) #if defined ( GB_PHASE_1_OF_2 ) rjnz += (pC_end - pC) ; #else for ( ; pC < pC_end ; pC++) { // C(i,j) is present but M(i,j) is not int64_t i = Ci [pC] ; GB_COPY_C ; } #endif // if C(:,j) is exhausted ; continue scanning all of M(:,j) for ( ; pM < pM_end ; pM++) { // M(i,j) is present but C(i,j) is not int64_t i = Mi [pM] ; bool mij = GB_mcast (Mx, pM, msize) ; if (mij) { // R(i,j) = Z(i,j) GB_COPY_Z_BITMAP_OR_FULL ; } } } else if (mjnz == 0) { //-------------------------------------------------------------- // Z is sparse or hypersparse, M(:,j) is empty //-------------------------------------------------------------- // ------------------------------------------ // C <!M> = Z R // ------------------------------------------ // sparse sparse sparse sparse // ------------------------------------------ // C <M> = Z R // ------------------------------------------ // sparse sparse sparse sparse // Z must be sparse or hypersparse ASSERT (Z_is_sparse || Z_is_hyper) ; if (!Mask_comp) { //---------------------------------------------------------- // Method02: M(:,j) is empty and not complemented //---------------------------------------------------------- // R(:,j) = C(:,j), regardless of Z(:,j) #if defined ( GB_PHASE_1_OF_2 ) rjnz = cjnz ; #else ASSERT (rjnz == cjnz) ; memcpy (Ri +(pR), Ci +(pC), cjnz * sizeof (int64_t)) ; #ifndef GB_ISO_MASKER if (C_iso) { for (int64_t k = 0 ; k < cjnz ; k++) { memcpy (Rx +(pR+k)*rsize, Cx, rsize) ; } } else { memcpy (Rx +(pR)*rsize, Cx +(pC)*rsize, cjnz*rsize) ; } #endif #endif } else { //---------------------------------------------------------- // Method03: M(:,j) is empty and complemented //---------------------------------------------------------- // R(:,j) = Z(:,j), regardless of C(:,j) #if defined ( GB_PHASE_1_OF_2 ) rjnz = zjnz ; #else ASSERT (rjnz == zjnz) ; memcpy (Ri +(pR), Zi +(pZ), zjnz * sizeof (int64_t)) ; #ifndef GB_ISO_MASKER if (Z_iso) { for (int64_t k = 0 ; k < zjnz ; k++) { memcpy (Rx +(pR+k)*rsize, Zx, rsize) ; } } else { memcpy (Rx +(pR)*rsize, Zx +(pZ)*rsize, zjnz*rsize) ; } #endif #endif } } else if (cdense && zdense) { //-------------------------------------------------------------- // Method03: C(:,j) and Z(:,j) dense: thus R(:,j) dense //-------------------------------------------------------------- // ------------------------------------------ // C <!M> = Z R // ------------------------------------------ // sparse sparse sparse sparse // sparse bitmap sparse sparse // sparse full sparse sparse // ------------------------------------------ // C <M> = Z R // ------------------------------------------ // sparse sparse sparse sparse // sparse bitmap sparse sparse // sparse full sparse sparse // Both C(:,j) and Z(:,j) are dense (that is, all entries // present), but both C and Z are stored in a sparse or // hypersparse sparsity structure. M has any sparsity. ASSERT (Z_is_sparse || Z_is_hyper) ; ASSERT (cjnz == zjnz) ; ASSERT (iC_first == iZ_first) ; ASSERT (iC_last == iZ_last ) ; #if defined ( GB_PHASE_1_OF_2 ) rjnz = cjnz ; #else ASSERT (rjnz == cjnz) ; for (int64_t p = 0 ; p < cjnz ; p++) { int64_t i = p + iC_first ; Ri [pR + p] = i ; int64_t iM = (pM < pM_end) ? GBI (Mi, pM, vlen) : INT64_MAX; bool mij = false ; if (i == iM) { mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; pM++ ; } if (Mask_comp) mij = !mij ; #ifndef GB_ISO_MASKER if (mij) { // R(i,j) = Z (i,j) memcpy (Rx +(pR+p)*rsize, Zx +(Z_iso? 0:(pZ+p)*rsize), rsize) ; } else { // R(i,j) = C (i,j) memcpy (Rx +(pR+p)*rsize, Cx +(C_iso? 0:(pC+p)*rsize), rsize) ; } #endif } #endif } else { //-------------------------------------------------------------- // Method04: 2-way merge of C(:,j) and Z(:,j) //-------------------------------------------------------------- // Z is sparse or hypersparse; M has any sparsity structure ASSERT (Z_is_sparse || Z_is_hyper) ; //-------------------------------------------------------------- // Z is sparse or hypersparse, M has any sparsity //-------------------------------------------------------------- // ------------------------------------------ // C <!M> = Z R // ------------------------------------------ // sparse sparse sparse sparse // sparse bitmap sparse sparse // sparse full sparse sparse // ------------------------------------------ // C <M> = Z R // ------------------------------------------ // sparse sparse sparse sparse // sparse bitmap sparse sparse // sparse full sparse sparse while (pC < pC_end && pZ < pZ_end) { //---------------------------------------------------------- // get the next i for R(:,j) //---------------------------------------------------------- int64_t iC = Ci [pC] ; int64_t iZ = Zi [pZ] ; int64_t i = GB_IMIN (iC, iZ) ; //---------------------------------------------------------- // get M(i,j) //---------------------------------------------------------- bool mij = false ; if (mdense) { //------------------------------------------------------ // Method04a: M(:,j) is dense //------------------------------------------------------ // mask is dense, lookup M(i,j) // iM_first == Mi [pM_first] // iM_first + delta == Mi [pM_first + delta] // let i = iM_first + delta // let pM = pM_first + delta // then delta = i - iM_first pM = pM_first + (i - iM_first) ; ASSERT (i == GBI (Mi, pM, vlen)) ; mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; // increment pM for the wrapup phase below pM++ ; } else { //------------------------------------------------------ // Method04b: M(:,j) is sparse //------------------------------------------------------ // Use GB_SPLIT_BINARY_SEARCH so that pM can be used in // the for loop with index pM in the wrapup phase. ASSERT (M_is_sparse || M_is_hyper) ; int64_t pright = pM_end - 1 ; bool found ; GB_SPLIT_BINARY_SEARCH (i, Mi, pM, pright, found) ; if (found) { ASSERT (i == Mi [pM]) ; mij = GB_mcast (Mx, pM, msize) ; // increment pM for the wrapup phase below pM++ ; } } if (Mask_comp) mij = !mij ; //---------------------------------------------------------- // R(i,j) = C(i,j) or Z(i,j) //---------------------------------------------------------- if (iC < iZ) { // C(i,j) is present but Z(i,j) is not if (!mij) GB_COPY_C ; pC++ ; } else if (iC > iZ) { // Z(i,j) is present but C(i,j) is not if (mij) GB_COPY_Z ; pZ++ ; } else { // both C(i,j) and Z(i,j) are present int64_t i = iC ; if (mij) { GB_COPY_Z ; } else { GB_COPY_C ; } pC++ ; pZ++ ; } } //-------------------------------------------------------------- // Method04: wrapup: C or Z are exhausted, or initially empty //-------------------------------------------------------------- cjnz = pC_end - pC ; // nnz (C(:,j)) remaining zjnz = pZ_end - pZ ; // nnz (Z(:,j)) remaining mjnz = pM_end - pM ; // nnz (M(:,j)) remaining if (cjnz == 0) { //---------------------------------------------------------- // C(:,j) is empty //---------------------------------------------------------- if (!Mask_comp) { //------------------------------------------------------ // mask is not complemented //------------------------------------------------------ if (mdense) { //-------------------------------------------------- // Method04c: M(:,j) is dense //-------------------------------------------------- for ( ; pZ < pZ_end ; pZ++) { int64_t i = Zi [pZ] ; // mask is dense, lookup M(i,j) pM = pM_first + (i - iM_first) ; ASSERT (i == GBI (Mi, pM, vlen)) ; bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; if (mij) GB_COPY_Z ; } } else if (zjnz > 32 * mjnz) { //-------------------------------------------------- // Method04d: Z(:,j) is much denser than M(:,j) //-------------------------------------------------- // This loop requires pM to start at the first // entry in M(:,j) that has not yet been handled. ASSERT (M_is_sparse || M_is_hyper) ; for ( ; pM < pM_end ; pM++) { if (GB_mcast (Mx, pM, msize)) { int64_t i = Mi [pM] ; int64_t pright = pZ_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Zi, pZ, pright, found); if (found) GB_COPY_Z ; } } } else if (mjnz > 32 * zjnz) { //-------------------------------------------------- // Method04e: M(:,j) is much denser than Z(:,j) //-------------------------------------------------- ASSERT (M_is_sparse || M_is_hyper) ; for ( ; pZ < pZ_end ; pZ++) { int64_t i = Zi [pZ] ; bool mij = false ; int64_t pright = pM_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Mi, pM, pright,found) ; if (found) mij = GB_mcast (Mx, pM, msize) ; if (mij) GB_COPY_Z ; } } else { //-------------------------------------------------- // Method04f: M(:,j) and Z(:,j) about same # entries //-------------------------------------------------- ASSERT (M_is_sparse || M_is_hyper) ; while (pM < pM_end && pZ < pZ_end) { int64_t iM = Mi [pM] ; int64_t i = Zi [pZ] ; if (iM < i) { // M(i,j) exists but not Z(i,j) pM++ ; } else if (i < iM) { // Z(i,j) exists but not M(i,j) pZ++ ; } else { // both M(i,j) and Z(i,j) exist if (GB_mcast (Mx, pM, msize)) GB_COPY_Z ; pM++ ; pZ++ ; } } } } else { //------------------------------------------------------ // complemented mask, and C(:,j) empty //------------------------------------------------------ if (mdense) { //-------------------------------------------------- // Method04g: M(:,j) is dense //-------------------------------------------------- for ( ; pZ < pZ_end ; pZ++) { int64_t i = Zi [pZ] ; // mask is dense, lookup M(i,j) pM = pM_first + (i - iM_first) ; ASSERT (i == GBI (Mi, pM, vlen)) ; bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; if (!mij) GB_COPY_Z ; // mask is complemented } } else { //-------------------------------------------------- // Method04h: M(:,j) is sparse //-------------------------------------------------- ASSERT (M_is_sparse || M_is_hyper) ; for ( ; pZ < pZ_end ; pZ++) { int64_t i = Zi [pZ] ; bool mij = false ; int64_t pright = pM_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Mi, pM, pright, found) ; if (found) mij = GB_mcast (Mx, pM, msize) ; if (!mij) GB_COPY_Z ; // mask is complemented } } } } else if (zjnz == 0) { //---------------------------------------------------------- // Z(:,j) is empty //---------------------------------------------------------- if (Mask_comp) { //------------------------------------------------------ // mask is complemented //------------------------------------------------------ if (mdense) { //-------------------------------------------------- // Method04i: M(:,j) is dense //-------------------------------------------------- for ( ; pC < pC_end ; pC++) { int64_t i = Ci [pC] ; // mask is dense, lookup M(i,j) pM = pM_first + (i - iM_first) ; ASSERT (i == GBI (Mi, pM, vlen)) ; bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; if (mij) GB_COPY_C ; } } else if (cjnz > 32 * mjnz) { //-------------------------------------------------- // Method04j: C(:,j) is much denser than M(:,j) //-------------------------------------------------- ASSERT (M_is_sparse || M_is_hyper) ; for ( ; pM < pM_end ; pM++) { if (GB_mcast (Mx, pM, msize)) { int64_t i = Mi [pM] ; int64_t pright = pC_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Ci, pC, pright, found); if (found) GB_COPY_C ; } } } else if (mjnz > 32 * cjnz) { //-------------------------------------------------- // Method04k: M(:,j) is much denser than C(:,j) //-------------------------------------------------- ASSERT (M_is_sparse || M_is_hyper) ; for ( ; pC < pC_end ; pC++) { int64_t i = Ci [pC] ; bool mij = false ; int64_t pright = pM_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Mi, pM, pright, found); if (found) mij = GB_mcast (Mx, pM, msize) ; if (mij) GB_COPY_C ; } } else { //-------------------------------------------------- // Method04l: M(:,j) and C(:,j) about same # entries //-------------------------------------------------- ASSERT (M_is_sparse || M_is_hyper) ; while (pM < pM_end && pC < pC_end) { int64_t iM = Mi [pM] ; int64_t i = Ci [pC] ; if (iM < i) { // M(i,j) exists but not C(i,j) pM++ ; } else if (i < iM) { // C(i,j) exists but not M(i,j) pC++ ; } else { // both M(i,j) and C(i,j) exist if (GB_mcast (Mx, pM, msize)) GB_COPY_C ; pM++ ; pC++ ; } } } } else { //------------------------------------------------------ // non-complemented mask, and Z(:,j) empty //------------------------------------------------------ if (mdense) { //-------------------------------------------------- // Method04m: M(:,j) is dense //-------------------------------------------------- for ( ; pC < pC_end ; pC++) { int64_t i = Ci [pC] ; // mask is dense, lookup M(i,j) pM = pM_first + (i - iM_first) ; ASSERT (i == GBI (Mi, pM, vlen)) ; bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; if (!mij) GB_COPY_C ; } } else { //-------------------------------------------------- // Method04n: M(:,j) is sparse //-------------------------------------------------- ASSERT (M_is_sparse || M_is_hyper) ; for ( ; pC < pC_end ; pC++) { int64_t i = Ci [pC] ; // M(i,j) false if not present bool mij = false ; int64_t pright = pM_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Mi, pM, pright, found) ; if (found) mij = GB_mcast (Mx, pM, msize) ; if (!mij) GB_COPY_C ; } } } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pR == pR_end) ; #endif } //------------------------------------------------------------------ // final count of nnz (R(:,j)) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (fine_task) { TaskList [taskid].pC = rjnz ; } else { Rp [k] = rjnz ; } #endif } } }
reduc_map_prob.c
#include <stdio.h> #include <stdlib.h> #define N 5000000 int main(){ double *B, *C; B = (double*)malloc(sizeof(double) * N); C = (double*)malloc(sizeof(double) * N); for(int i = 0; i < N; i++){ B[i] = 1.0; C[i] = 1.0; } double sum = 0; #pragma omp target data map(to:B[0:N], C[0:N]) #pragma omp target teams distribute parallel for reduction(+:sum) map(tofrom:sum) for(int i = 0; i < N; i++) sum += B[i] * C[i]; printf("SUM = %f\n", sum); if (sum != N){ printf("Failed!\n"); return -1; } else{ printf("SUCCESS!\n"); } free(B); free(C); return 0; }
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % John Cristy % % July 1998 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const ChannelType channel,const DrawInfo *draw_info, % const MagickPixelPacket target,const ssize_t x_offset, % const ssize_t y_offset,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const ChannelType channel,const DrawInfo *draw_info, const MagickPixelPacket *target,const ssize_t x_offset,const ssize_t y_offset, const MagickBooleanType invert) { #define MaxStacksize 131072UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; ExceptionInfo *exception; Image *floodplane_image; MagickBooleanType skip; MagickPixelPacket fill, pixel; MemoryInfo *segment_info; PixelPacket fill_color; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,RGBColorspace); if ((image->matte == MagickFalse) && (draw_info->fill.opacity != OpaqueOpacity)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ exception=(&image->exception); x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); GetMagickPixelPacket(image,&fill); GetMagickPixelPacket(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p--; q--; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y, image->columns-x,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p++; q++; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; /* Tile fill color onto floodplane. */ p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) { (void) GetFillColor(draw_info,x,y,&fill_color); SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&fill); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(fill.blue)); if (((channel & OpacityChannel) != 0) || (draw_info->fill.opacity != OpaqueOpacity)) SetPixelOpacity(q,ClampToQuantum(fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(fill.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelPacket *start_color, % const PixelPacket *stop_color) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % This provides a good example of making use of the DrawGradientImage % function and the gradient structure in draw_info. */ static inline double MagickMax(const double x,const double y) { return(x > y ? x : y); } MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method, const PixelPacket *start_color,const PixelPacket *stop_color) { DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; register ssize_t i; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(start_color != (const PixelPacket *) NULL); assert(stop_color != (const PixelPacket *) NULL); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; gradient->gradient_vector.x2=(double) image->columns-1.0; gradient->gradient_vector.y2=(double) image->rows-1.0; if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; gradient->radius=MagickMax(gradient->center.x,gradient->center.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=2; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gradient->stops,0,gradient->number_stops* sizeof(*gradient->stops)); for (i=0; i < (ssize_t) gradient->number_stops; i++) GetMagickPixelPacket(image,&gradient->stops[i].color); SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL, &gradient->stops[0].color); gradient->stops[0].offset=0.0; SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL, &gradient->stops[1].color); gradient->stops[1].offset=1.0; /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads, sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count, sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **restrict histograms, width; ssize_t y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth2D(radius,0.5); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse) { InheritException(exception,&paint_image->exception); linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict paint_indexes; register ssize_t x; register PixelPacket *restrict q; register size_t *histogram; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view); histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, v; /* Assign most frequent color. */ i=0; j=0; count=0; (void) ResetMagickMemory(histogram,0,NumberPaintBins*sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { k=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+u+i))); histogram[k]++; if (histogram[k] > count) { j=i+u; count=histogram[k]; } } i+=(ssize_t) (linear_image->columns+width); } *q=(*(p+j)); if (linear_image->colorspace == CMYKColorspace) SetPixelIndex(paint_indexes+x,GetPixelIndex(indexes+x+j)); p++; q++; } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(image,OilPaintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image, % const PixelPacket *target,const PixelPacket *fill, % const MagickBooleanType invert) % MagickBooleanType OpaquePaintImageChannel(Image *image, % const ChannelType channel,const PixelPacket *target, % const PixelPacket *fill,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const MagickPixelPacket *target,const MagickPixelPacket *fill, const MagickBooleanType invert) { return(OpaquePaintImageChannel(image,CompositeChannels,target,fill,invert)); } MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *target, const MagickPixelPacket *fill,const MagickBooleanType invert) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (MagickPixelPacket *) NULL); assert(fill != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsMagickGray(fill) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); if ((fill->opacity != OpaqueOpacity) && (image->matte == MagickFalse)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Make image color opaque. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(fill->red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(fill->green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(fill->blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(fill->opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(fill->index)); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImageChannel) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *target,const Quantum opacity, % const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const MagickPixelPacket *target,const Quantum opacity, const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, the % TransparentPaintImage() API is not suitable for the operations like chroma, % where the tolerance for similarity of two color component (RGB) can be % different, Thus we define this method take two target pixels (one % low and one hight) and all the pixels of an image which are lying between % these two pixels are made transparent. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *low,const MagickPixelPacket *hight, % const Quantum opacity,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const MagickPixelPacket *low,const MagickPixelPacket *high, const Quantum opacity,const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(high != (MagickPixelPacket *) NULL); assert(low != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,ResetAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
QuadNodeCartesianEuclid.h
/* * QuadNodePolarEuclid.h * * Created on: 21.05.2014 * Author: Moritz v. Looz (moritz.looz-corswarem@kit.edu) * * Note: This is similar enough to QuadNode.h that one could merge these two classes. */ #ifndef QUADNODECARTESIANEUCLID_H_ #define QUADNODECARTESIANEUCLID_H_ #include <vector> #include <algorithm> #include <functional> #include <assert.h> #include "../../auxiliary/Log.h" #include "../../geometric/HyperbolicSpace.h" using std::vector; using std::min; using std::max; using std::cos; namespace NetworKit { template <class T> class QuadNodeCartesianEuclid { friend class QuadTreeGTest; private: Point<double> minPoint; Point<double> maxPoint; count dimension; unsigned capacity; static const unsigned coarsenLimit = 4; static const long unsigned sanityNodeLimit = 10E15; //just assuming, for debug purposes, that this algorithm never runs on machines with more than 4 Petabyte RAM count subTreeSize; std::vector<T> content; std::vector<Point<double> > positions; bool isLeaf; bool splitTheoretical; index ID; double lowerBoundR; public: std::vector<QuadNodeCartesianEuclid> children; /** * Construct a QuadNode for polar coordinates. * * * @param leftAngle Minimal angular coordinate of region, in radians from 0 to 2\pi * @param rightAngle Maximal angular coordinate of region, in radians from 0 to 2\pi * @param minR Minimal radial coordinate of region, between 0 and 1 * @param maxR Maximal radial coordinate of region, between 0 and 1 * @param capacity Number of points a leaf cell can store before splitting * @param minDiameter Minimal diameter of a quadtree node. If the node is already smaller, don't split even if over capacity. Default is 0 * @param splitTheoretical Whether to split in a theoretically optimal way or in a way to decrease measured running times * @param alpha dispersion Parameter of the point distribution. Only has an effect if theoretical split is true * @param diagnostics Count how many necessary and unnecessary comparisons happen in leaf cells? Will cause race condition and false sharing in parallel use * */ QuadNodeCartesianEuclid(Point<double> lower = Point<double>({0.0, 0.0}), Point<double> upper = Point<double>({1.0, 1.0}), unsigned capacity = 1000, bool splitTheoretical = false) { this->minPoint = lower; this->maxPoint = upper; this->dimension = minPoint.getDimensions(); assert(maxPoint.getDimensions() == dimension); this->capacity = capacity; this->splitTheoretical = splitTheoretical; this->ID = 0; isLeaf = true; subTreeSize = 0; } void split() { assert(isLeaf); assert(children.size() == 0); vector<double> middle(dimension); if (splitTheoretical) { //Euclidean space is distributed equally for (index d = 0; d < dimension; d++) { middle[d] = (minPoint[d] + maxPoint[d]) / 2; } } else { //median of points const count numPoints = positions.size(); assert(numPoints > 0);//otherwise, why split? vector<vector<double> > sorted(dimension); for (index d = 0; d < dimension; d++) { sorted[d].resize(numPoints); for (index i = 0; i < numPoints; i++) { sorted[d][i] = positions[i][d]; } std::sort(sorted[d].begin(), sorted[d].end()); middle[d] = sorted[d][numPoints/2];//this will crash if no points are there! assert(middle[d] <= maxPoint[d]); assert(middle[d] >= minPoint[d]); } } count childCount = pow(2,dimension); for (index i = 0; i < childCount; i++) { vector<double> lowerValues(dimension); vector<double> upperValues(dimension); index bitCopy = i; for (index d = 0; d < dimension; d++) { if (bitCopy & 1) { lowerValues[d] = middle[d]; upperValues[d] = maxPoint[d]; } else { lowerValues[d] = minPoint[d]; upperValues[d] = middle[d]; } bitCopy = bitCopy >> 1; } QuadNodeCartesianEuclid child(Point<double>(lowerValues), Point<double>(upperValues), capacity, splitTheoretical); assert(child.isLeaf); children.push_back(child); } isLeaf = false; } /** * Add a point at polar coordinates (angle, R) with content input. May split node if capacity is full * * @param input arbitrary content, in our case an index * @param angle angular coordinate of point, between 0 and 2 pi. * @param R radial coordinate of point, between 0 and 1. */ void addContent(T input, Point<double> pos) { assert(input < sanityNodeLimit); assert(content.size() == positions.size()); assert(this->responsible(pos)); if (isLeaf) { if (content.size() + 1 < capacity) { content.push_back(input); positions.push_back(pos); } else { split(); for (index i = 0; i < content.size(); i++) { this->addContent(content[i], positions[i]); } assert(subTreeSize == content.size());//we have added everything twice subTreeSize = content.size(); content.clear(); positions.clear(); this->addContent(input, pos); } } else { assert(children.size() > 0); bool foundResponsibleChild = false; for (index i = 0; i < children.size(); i++) { if (children[i].responsible(pos)) { foundResponsibleChild = true; children[i].addContent(input, pos); break; } } assert(foundResponsibleChild); subTreeSize++; } } /** * Remove content at coordinate pos. May cause coarsening of the quadtree * * @param input Content to be removed * @param pos Coordinate of content * * @return True if content was found and removed, false otherwise */ bool removeContent(T input, Point<double> pos) { if (!responsible(pos)) return false; if (isLeaf) { index i = 0; for (; i < content.size(); i++) { if (content[i] == input) break; } if (i < content.size()) { assert(positions[i].distance(pos) == 0); //remove element content.erase(content.begin()+i); positions.erase(positions.begin()+i); return true; } else { return false; } } else { bool removed = false; bool allLeaves = true; assert(children.size() > 0); for (index i = 0; i < children.size(); i++) { if (!children[i].isLeaf) allLeaves = false; if (children[i].removeContent(input, pos)) { assert(!removed); removed = true; } } if (removed) subTreeSize--; //coarsen? if (removed && allLeaves && size() < coarsenLimit) { //coarsen!! //why not assert empty containers and then insert directly? vector<T> allContent; vector<Point<double> > allPositions; for (index i = 0; i < children.size(); i++) { allContent.insert(allContent.end(), children[i].content.begin(), children[i].content.end()); allPositions.insert(allPositions.end(), children[i].positions.begin(), children[i].positions.end()); } assert(allContent.size() == allPositions.size()); children.clear(); content.swap(allContent); positions.swap(allPositions); isLeaf = true; } return removed; } } /** * Check whether the region managed by this node lies outside of an Euclidean circle. * * @param query Center of the Euclidean query circle, given in Cartesian coordinates * @param radius Radius of the Euclidean query circle * * @return True if the region managed by this node lies completely outside of the circle */ bool outOfReach(Point<double> query, double radius) const { return EuclideanDistances(query).first > radius; } /** * @param query Position of the query point */ std::pair<double, double> EuclideanDistances(Point<double> query) const { /** * If the query point is not within the quadnode, the distance minimum is on the border. * Need to check whether extremum is between corners. */ double maxDistance = 0; double minDistance = std::numeric_limits<double>::max(); //Point<double> minCopy(minPoint); //Point<double> maxCopy(minPoint); if (responsible(query)) minDistance = 0; auto updateMinMax = [&minDistance, &maxDistance, query](Point<double> pos){ double extremalValue = pos.distance(query); maxDistance = std::max(extremalValue, maxDistance); minDistance = std::min(minDistance, extremalValue); }; vector<double> closestValues(dimension); vector<double> farthestValues(dimension); for (index d = 0; d < dimension; d++) { if (std::abs(query[d] - minPoint.at(d)) < std::abs(query[d] - maxPoint.at(d))) { closestValues[d] = minPoint.at(d); farthestValues[d] = maxPoint.at(d); } else { farthestValues[d] = minPoint.at(d); closestValues[d] = maxPoint.at(d); } if (query[d] >= minPoint.at(d) && query[d] <= maxPoint.at(d)) { closestValues[d] = query[d]; } } updateMinMax(Point<double>(closestValues)); updateMinMax(Point<double>(farthestValues)); assert(minDistance < query.length() + maxPoint.length()); assert(minDistance < maxDistance); return std::pair<double, double>(minDistance, maxDistance); } /** * Does the point at (angle, r) fall inside the region managed by this QuadNode? * * @param angle Angular coordinate of input point * @param r Radial coordinate of input points * * @return True if input point lies within the region of this QuadNode */ bool responsible(Point<double> pos) const { for (index d = 0; d < dimension; d++) { if (pos[d] < minPoint.at(d) || pos[d] >= maxPoint.at(d)) return false; } return true; } /** * Get all Elements in this QuadNode or a descendant of it * * @return vector of content type T */ std::vector<T> getElements() const { if (isLeaf) { return content; } else { assert(content.size() == 0); assert(positions.size() == 0); vector<T> result; for (index i = 0; i < children.size(); i++) { std::vector<T> subresult = children[i].getElements(); result.insert(result.end(), subresult.begin(), subresult.end()); } return result; } } void getCoordinates(vector<Point<double> > &pointContainer) const { if (isLeaf) { pointContainer.insert(pointContainer.end(), positions.begin(), positions.end()); } else { assert(content.size() == 0); assert(positions.size() == 0); for (index i = 0; i < children.size(); i++) { children[i].getCoordinates(pointContainer); } } } /** * Main query method, get points lying in a Euclidean circle around the center point. * Optional limits can be given to get a different result or to reduce unnecessary comparisons * * Elements are pushed onto a vector which is a required argument. This is done to reduce copying. * (Maybe not necessary due to copy elisison) * * Safe to call in parallel. * * @param center Center of the query circle * @param radius Radius of the query circle * @param result Reference to the vector where the results will be stored * @param minAngle Optional value for the minimum angular coordinate of the query region * @param maxAngle Optional value for the maximum angular coordinate of the query region * @param lowR Optional value for the minimum radial coordinate of the query region * @param highR Optional value for the maximum radial coordinate of the query region */ void getElementsInEuclideanCircle(Point<double> center, double radius, vector<T> &result) const { if (outOfReach(center, radius)) { return; } if (isLeaf) { const double rsq = radius*radius; const count cSize = content.size(); for (int i=0; i < cSize; i++) { if (positions[i].squaredDistance(center) < rsq) { result.push_back(content[i]); if (content[i] >= sanityNodeLimit) DEBUG("Quadnode content ", content[i], " found, suspiciously high!"); assert(content[i] < sanityNodeLimit); } } } else { for (index i = 0; i < children.size(); i++) { children[i].getElementsInEuclideanCircle(center, radius, result); } } } count getElementsProbabilistically(Point<double> euQuery, std::function<double(double)> prob, vector<T> &result) const { TRACE("Getting Euclidean distances"); auto distancePair = EuclideanDistances(euQuery); double probUB = prob(distancePair.first); double probLB = prob(distancePair.second); assert(probLB <= probUB); if (probUB > 0.5) probUB = 1; if (probUB == 0) return 0; //TODO: return whole if probLB == 1 double probdenom = std::log(1-probUB); if (probdenom == 0) return 0;//there is a very small probability, but we cannot process it. TRACE("probUB: ", probUB, ", probdenom: ", probdenom); count expectedNeighbours = probUB*size(); count candidatesTested = 0; count incomingNeighbours = result.size(); count ownsize = size(); if (isLeaf) { const count lsize = content.size(); TRACE("Leaf of size ", lsize); for (int i = 0; i < lsize; i++) { //jump! if (probUB < 1) { double random = Aux::Random::real(); double delta = std::log(random) / probdenom; assert(delta >= 0); i += delta; if (i >= lsize) break; TRACE("Jumped with delta ", delta, " arrived at ", i); } assert(i >= 0); //see where we've arrived candidatesTested++; double distance = positions[i].distance(euQuery); assert(distance >= distancePair.first);//TODO: These should not fail! assert(distance <= distancePair.second); double q = prob(distance); q = q / probUB; //since the candidate was selected by the jumping process, we have to adjust the probabilities assert(q <= 1); //accept? double acc = Aux::Random::real(); if (acc < q) { TRACE("Accepted node ", i, " with probability ", q, "."); result.push_back(content[i]); } } } else { if (expectedNeighbours < 4 || probUB < 1/1000) {//select candidates directly instead of calling recursively TRACE("probUB = ", probUB, ", switching to direct candidate selection."); assert(probUB < 1); const count stsize = size(); for (index i = 0; i < stsize; i++) { double delta = std::log(Aux::Random::real()) / probdenom; assert(delta >= 0); i += delta; TRACE("Jumped with delta ", delta, " arrived at ", i, ". Calling maybeGetKthElement."); if (i < size()) maybeGetKthElement(probUB, euQuery, prob, i, result);//this could be optimized. As of now, the offset is subtracted separately for each point else break; candidatesTested++; } } else {//carry on as normal for (index i = 0; i < children.size(); i++) { TRACE("Recursively calling child ", i); candidatesTested += children[i].getElementsProbabilistically(euQuery, prob, result); } } } count finalNeighbours = result.size(); if (probLB == 1) assert(finalNeighbours == incomingNeighbours + ownsize); return candidatesTested; } void maybeGetKthElement(double upperBound, Point<double> euQuery, std::function<double(double)> prob, index k, vector<T> &circleDenizens) const { TRACE("Maybe get element ", k, " with upper Bound ", upperBound); assert(k < size()); if (isLeaf) { double acceptance = prob(euQuery.distance(positions[k]))/upperBound; TRACE("Is leaf, accept with ", acceptance); if (Aux::Random::real() < acceptance) circleDenizens.push_back(content[k]); } else { TRACE("Call recursively."); index offset = 0; for (index i = 0; i < children.size(); i++) { count childsize = children[i].size(); if (k - offset < childsize) { children[i].maybeGetKthElement(upperBound, euQuery, prob, k - offset, circleDenizens); break; } offset += childsize; } } } /** * Shrink all vectors in this subtree to fit the content. * Call after quadtree construction is complete, causes better memory usage and cache efficiency */ void trim() { content.shrink_to_fit(); positions.shrink_to_fit(); if (!isLeaf) { for (index i = 0; i < children.size(); i++) { children[i].trim(); } } } /** * Number of points lying in the region managed by this QuadNode */ count size() const { return isLeaf ? content.size() : subTreeSize; } void recount() { subTreeSize = 0; for (index i = 0; i < children.size(); i++) { children[i].recount(); subTreeSize += children[i].size(); } } /** * Height of subtree hanging from this QuadNode */ count height() const { count result = 1;//if leaf node, the children loop will not execute for (auto child : children) result = std::max(result, child.height()+1); return result; } /** * Leaf cells in the subtree hanging from this QuadNode */ count countLeaves() const { if (isLeaf) return 1; count result = 0; for (index i = 0; i < children.size(); i++) { result += children[i].countLeaves(); } return result; } index getID() const { return ID; } index indexSubtree(index nextID) { index result = nextID; assert(children.size() == pow(2,dimension) || children.size() == 0); for (int i = 0; i < children.size(); i++) { result = children[i].indexSubtree(result); } this->ID = result; return result+1; } index getCellID(Point<double> pos) const { if (!responsible(pos)) return -1; if (isLeaf) return getID(); else { for (int i = 0; i < children.size(); i++) { index childresult = children[i].getCellID(pos); if (childresult >= 0) return childresult; } assert(false); //if responsible return -1; } } index getMaxIDInSubtree() const { if (isLeaf) return getID(); else { index result = -1; for (int i = 0; i < children.size(); i++) { result = std::max(children[i].getMaxIDInSubtree(), result); } return std::max(result, getID()); } } count reindex(count offset) { if (isLeaf) { #pragma omp task { index p = offset; std::generate(content.begin(), content.end(), [&p](){return p++;}); } offset += size(); } else { for (int i = 0; i < children.size(); i++) { offset = children[i].reindex(offset); } } return offset; } }; } #endif /* QUADNODE_H_ */
HDAA_fmt_plug.c
/* HTTP Digest access authentication patch for john * * Written by Romain Raboin. OMP and intrinsics support by magnum * * This software is Copyright (c) 2008 Romain Raboin - romain.raboin at * gmail.com, and Copyright (c) 2012 magnum and it is hereby released to * the general public under the following terms: Redistribution and * use in source and binary forms, with or without modification, are * permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_HDAA; #elif FMT_REGISTERS_H john_register_one(&fmt_HDAA); #else #include <string.h> #ifdef __MMX__ #include <mmintrin.h> #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "md5.h" #include "stdint.h" #include "simd-intrinsics.h" #define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME #if !FAST_FORMATS_OMP #undef _OPENMP #endif #if defined(_OPENMP) #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "hdaa" #define FORMAT_NAME "HTTP Digest access authentication" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 32 #define CIPHERTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(reqinfo_t) #define SALT_ALIGN 4 #if defined(_OPENMP) static unsigned int omp_t = 1; #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 256 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #endif #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5) #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) #define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&0x1c)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32 ) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define SEPARATOR '$' #define MAGIC "$response$" #define MAGIC_LEN (sizeof(MAGIC)-1) #define SIZE_TAB 12 // This is 8 x 64 bytes, so in MMX/SSE2 we support up to 9 limbs of MD5 #define HTMP 512 typedef struct { size_t h1tmplen; size_t h3tmplen; char h1tmp[HTMP]; char h3tmp[HTMP]; } reqinfo_t; /* digest authentication scheme : h1 = md5(user:realm:password) h2 = md5(method:digestURI) response = h3 = md5(h1:nonce:nonceCount:ClientNonce:qop:h2) */ /* request information */ enum e_req { R_RESPONSE, R_USER, R_REALM, R_METHOD, R_URI, R_NONCE, R_NONCECOUNT, R_CLIENTNONCE, R_QOP }; /* response:user:realm:method:uri:nonce:nonceCount:ClientNonce:qop */ static struct fmt_tests tests[] = { {"$response$679066476e67b5c7c4e88f04be567f8b$user$myrealm$GET$/$8c12bd8f728afe56d45a0ce846b70e5a$00000001$4b61913cec32e2c9$auth", "nocode"}, {"$response$faa6cb7d676e5b7c17fcbf966436aa0c$moi$myrealm$GET$/$af32592775d27b1cd06356b3a0db9ddf$00000001$8e1d49754a25aea7$auth", "kikou"}, {"$response$56940f87f1f53ade8b7d3c5a102c2bf3$usrx$teN__chars$GET$/4TLHS1TMN9cfsbqSUAdTG3CRq7qtXMptnYfn7mIIi3HRKOMhOks56e$2c0366dcbc$00000001$0153$auth", "passWOrd"}, {"$response$8663faf2337dbcb2c52882807592ec2c$user$myrealm$GET$/$8c12bd8f728afe56d45a0ce846b70e5a$", "pass"}, {"$response$8663faf2337dbcb2c52882807592ec2c$user$myrealm$GET$/$8c12bd8f728afe56d45a0ce846b70e5a", "pass"}, {NULL} }; /* used by set_key */ static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; #ifdef SIMD_COEF_32 #define LIMBS 9 static unsigned char *saved_key[LIMBS]; static unsigned int *interm_key; static unsigned int *crypt_key; #else static int (*saved_len); static unsigned char (*crypt_key)[BINARY_SIZE]; #endif /* Store information about the request ()*/ static reqinfo_t *rinfo = NULL; static void init(struct fmt_main *self) { #ifdef SIMD_COEF_32 int i; #endif #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 for (i = 0; i < LIMBS; i++) saved_key[i] = mem_calloc_align(self->params.max_keys_per_crypt, 64, MEM_ALIGN_SIMD); interm_key = mem_calloc_align(self->params.max_keys_per_crypt, 16, MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt, 16, MEM_ALIGN_SIMD); #else saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); } static void done(void) { #ifdef SIMD_COEF_32 int i; #endif MEM_FREE(saved_plain); MEM_FREE(crypt_key); #ifdef SIMD_COEF_32 MEM_FREE(interm_key); for (i = 0; i < LIMBS; i++) MEM_FREE(saved_key[i]); #else MEM_FREE(saved_len); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; if (strncmp(ciphertext, MAGIC, MAGIC_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += MAGIC_LEN; if ((p = strtokm(ctcopy, "$")) == NULL) /* hash */ goto err; if (!ishexlc(p) || strlen(p) != 32) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* user */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* realm */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* method */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* uri */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* nonce */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* End of legacy HDAA or noncecount */ goto end_hdaa_legacy; if ((p = strtokm(NULL, "$")) == NULL) /* clientnonce */ goto err; if (!ishexlc(p) ) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* qop */ goto err; if ((p = strtokm(NULL, "$")) != NULL) goto err; end_hdaa_legacy: MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } // Normalize shorter hashes, to allow with or without trailing '$' character. static char *split(char *ciphertext, int index, struct fmt_main *self) { char *cp; if (strncmp(ciphertext, MAGIC, MAGIC_LEN)) return ciphertext; cp = ciphertext + MAGIC_LEN; cp = strchr(cp, '$'); if (!cp) return ciphertext; cp = strchr(cp+1, '$'); if (!cp) return ciphertext; cp = strchr(cp+1, '$'); if (!cp) return ciphertext; cp = strchr(cp+1, '$'); if (!cp) return ciphertext; cp = strchr(cp+1, '$'); if (!cp) return ciphertext; // now if we have $binary_hash$ then we remove the last '$' char if (strlen(cp) == 1 + BINARY_SIZE*2 + 1) { static char out[256]; strnzcpy(out, ciphertext, sizeof(out)); out[strlen(out)-1] = 0; return out; } return ciphertext; } static void set_salt(void *salt) { rinfo = salt; } static void set_key(char *key, int index) { strcpy(saved_plain[index], key); #ifndef SIMD_COEF_32 saved_len[index] = -1; #endif } static char *get_key(int index) { return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x,y=0; #ifdef _OPENMP for(; y < SIMD_PARA_MD5 * omp_t; y++) #else for(; y < SIMD_PARA_MD5; y++) #endif for(x = 0; x < SIMD_COEF_32; x++) { if( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[y*SIMD_COEF_32*4+x] ) return 1; } return 0; #else int index; for (index = 0; index < count; index++) if (!(memcmp(binary, crypt_key[index], BINARY_SIZE))) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int i,x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; for(i=0;i<(BINARY_SIZE/4);i++) if ( ((ARCH_WORD_32*)binary)[i] != ((ARCH_WORD_32*)crypt_key)[y*SIMD_COEF_32*4+i*SIMD_COEF_32+x] ) return 0; return 1; #else return !(memcmp(binary, crypt_key[index], BINARY_SIZE)); #endif } static int cmp_exact(char *source, int index) { return 1; } /* convert hash from binary to ascii */ #ifdef SIMD_COEF_32 // This code should be rewritten in intrinsics, reading from // MMX or SSE2 output buffers and writing to MMX/SSE2 input buffers. static inline void sse_bin2ascii(unsigned char *conv, unsigned char *src) { unsigned int index; for (index = 0; index < NBKEYS; index++) { unsigned int i, j = 0; for (i = 0; i < BINARY_SIZE; i += 2) { unsigned int t; t = (src[GETOUTPOS((i + 1), index)] & 0x0f); t <<= 12; t |= (src[GETOUTPOS((i + 1), index)] & 0xf0); t <<= 4; t |= (src[GETOUTPOS(i, index)] & 0x0f); t <<= 8; t |= ((src[GETOUTPOS(i, index)] & 0xf0) >> 4); t += 0x06060606; t += ((((t >> 4) & 0x01010101) * 0x27) + 0x2a2a2a2a); *(unsigned int*)&conv[GETPOS(j, index)] = t; j+=4; } } } #endif /* SIMD_COEF_32 */ #ifdef __MMX__ static inline void bin2ascii(__m64 *conv, __m64 *src) { unsigned int i = 0; while (i != 4) { __m64 l; __m64 r; __m64 t; __m64 u; __m64 v; /* 32 bits to 64 bits */ t = _mm_set1_pi32(0x0f0f0f0f); /* Bit-wise AND the 64-bit values in M1 and M2. */ u = _mm_and_si64(_mm_srli_si64(src[(i / 2)], 4), t); v = _mm_and_si64(src[(i / 2)], t); /* interleaving */ l = _mm_unpacklo_pi8(u, v); r = _mm_unpackhi_pi8(u, v); t = _mm_set1_pi32(0x06060606); l = _mm_add_pi32(l, t); r = _mm_add_pi32(r, t); t = _mm_set1_pi32(0x01010101); /* u = (l << 4) & t */ u = _mm_and_si64(_mm_srli_si64(l, 4), t); /* v = (r << 4) & t */ v = _mm_and_si64(_mm_srli_si64(r, 4), t); t = _mm_set1_pi32(0x00270027); /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce the low 16 bits of the results. */ u = _mm_mullo_pi16(u, t); v = _mm_mullo_pi16(v, t); t = _mm_set1_pi32(0x2a2a2a2a); u = _mm_add_pi32(u, t); v = _mm_add_pi32(v, t); conv[(i++)] = _mm_add_pi32(l, u); conv[(i++)] = _mm_add_pi32(r, v); } __asm__ __volatile__("emms"); } #else static inline void bin2ascii(uint32_t *conv, uint32_t *source) { unsigned char *src = (unsigned char*)source; unsigned int i; unsigned int j = 0; uint32_t t = 0; for (i = 0; i < BINARY_SIZE; i += 2) { #if (ARCH_LITTLE_ENDIAN == 0) t = (src[i] & 0xf0); t *= 0x10; t += (src[i] & 0x0f); t *= 0x1000; t += (src[(i + 1)] & 0xf0); t *= 0x10; t += (src[(i + 1)] & 0x0f); #else t = (src[(i + 1)] & 0x0f); t *= 0x1000; t += (src[(i + 1)] & 0xf0); t *= 0x10; t += (src[i] & 0x0f); t *= 0x100; t += ((src[i] & 0xf0) >> 4); #endif t += 0x06060606; t += ((((t >> 4) & 0x01010101) * 0x27) + 0x2a2a2a2a); conv[(j++)] = t; } } #endif /* MMX */ #if SIMD_COEF_32 static inline void crypt_done(unsigned const int *source, unsigned int *dest, int index) { unsigned int i; unsigned const int *s = &source[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32]; unsigned int *d = &dest[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32]; for (i = 0; i < BINARY_SIZE / 4; i++) { *d = *s; s += SIMD_COEF_32; d += SIMD_COEF_32; } } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #if SIMD_COEF_32 #if defined(_OPENMP) #define ti (thread*NBKEYS+index) int thread; #pragma omp parallel for for (thread = 0; thread < (count+NBKEYS-1)/NBKEYS; thread++) #else #define thread 0 #define ti index #endif { static unsigned int crypt_len[NBKEYS]; unsigned int index, i, shortest, longest; for (index = 0; index < NBKEYS; index++) { int len; char temp; const char *key; key = rinfo->h1tmp; for (len = 0; len < rinfo->h1tmplen; len += 4, key += 4) *(ARCH_WORD_32*)&saved_key[len>>6][GETPOS(len, ti)] = *(ARCH_WORD_32*)key; len = rinfo->h1tmplen; key = (char*)&saved_plain[ti]; while((temp = *key++)) { saved_key[len>>6][GETPOS(len, ti)] = temp; len++; } saved_key[len>>6][GETPOS(len, ti)] = 0x80; // Clean rest of this buffer i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; for (; i < (((len+8)>>6)+1)*64; i += 4) *(ARCH_WORD_32*)&saved_key[i>>6][GETPOS(i, ti)] = 0; ((unsigned int *)saved_key[(len+8)>>6])[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (ti/SIMD_COEF_32)*16*SIMD_COEF_32] = len << 3; } SIMDmd5body(&saved_key[0][thread*64*NBKEYS], &crypt_key[thread*4*NBKEYS], NULL, SSEi_MIXED_IN); sse_bin2ascii((unsigned char*)&saved_key[0][thread*64*NBKEYS], (unsigned char*)&crypt_key[thread*4*NBKEYS]); longest = 0; shortest = HTMP; for (index = 0; index < NBKEYS; index++) { const char *key; int i, len; len = CIPHERTEXT_LENGTH - 1; key = rinfo->h3tmp + CIPHERTEXT_LENGTH; // Copy a char at a time until aligned at destination while (++len & 3) saved_key[len>>6][GETPOS(len, ti)] = *key++; // ...then a word at a time. This is a good boost, we are copying over 100 bytes. for (;len < rinfo->h3tmplen; len += 4, key += 4) *(ARCH_WORD_32*)&saved_key[len>>6][GETPOS(len, ti)] = *(ARCH_WORD_32*)key; len = rinfo->h3tmplen; saved_key[len>>6][GETPOS(len, ti)] = 0x80; // Clean rest of this buffer i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; //for (; i < (((len+8)>>6)+1)*64; i += 4) for (; i <= crypt_len[index]; i += 4) *(ARCH_WORD_32*)&saved_key[i>>6][GETPOS(i, ti)] = 0; ((unsigned int *)saved_key[(len+8)>>6])[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (ti/SIMD_COEF_32)*16*SIMD_COEF_32] = len << 3; crypt_len[index] = len; if (len > longest) longest = len; if (len < shortest) shortest = len; } // First limb SIMDmd5body(&saved_key[0][thread*64*NBKEYS], &interm_key[thread*4*NBKEYS], NULL, SSEi_MIXED_IN); // Copy any output that is done now if (shortest < 56) { if (longest < 56) memcpy(&crypt_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], 16*NBKEYS); else for (index = 0; index < NBKEYS; index++) if (crypt_len[index] < 56) crypt_done(interm_key, crypt_key, ti); } // Do the rest of the limbs for (i = 1; i < (((longest + 8) >> 6) + 1); i++) { SIMDmd5body(&saved_key[i][thread*64*NBKEYS], &interm_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], SSEi_RELOAD|SSEi_MIXED_IN); // Copy any output that is done now if (shortest < i*64+56) { if (shortest > (i-1)*64+55 && longest < i*64+56) memcpy(&crypt_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], 16*NBKEYS); else for (index = 0; index < NBKEYS; index++) if (((crypt_len[index] + 8) >> 6) == i) crypt_done(interm_key, crypt_key, ti); } } } #undef thread #undef ti #else int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; int len; #ifdef _OPENMP char h3tmp[HTMP]; char h1tmp[HTMP]; #else char *h3tmp; char *h1tmp; #endif size_t tmp; #ifdef __MMX__ __m64 h1[BINARY_SIZE / sizeof(__m64)]; __m64 conv[CIPHERTEXT_LENGTH / sizeof(__m64) + 1]; #else uint32_t h1[BINARY_SIZE / sizeof(uint32_t)]; uint32_t conv[(CIPHERTEXT_LENGTH / sizeof(uint32_t)) + 1]; #endif tmp = rinfo->h1tmplen; if ((len = saved_len[index]) < 0) len = saved_len[index] = strlen(saved_plain[index]); #ifdef _OPENMP memcpy(h1tmp, rinfo->h1tmp, tmp); memcpy(h3tmp + CIPHERTEXT_LENGTH, rinfo->h3tmp + CIPHERTEXT_LENGTH, rinfo->h3tmplen - CIPHERTEXT_LENGTH); #else h3tmp = rinfo->h3tmp; h1tmp = rinfo->h1tmp; #endif memcpy(&h1tmp[tmp], saved_plain[index], len); MD5_Init(&ctx); MD5_Update(&ctx, h1tmp, len + tmp); MD5_Final((unsigned char*)h1, &ctx); bin2ascii(conv, h1); memcpy(h3tmp, conv, CIPHERTEXT_LENGTH); MD5_Init(&ctx); MD5_Update(&ctx, h3tmp, rinfo->h3tmplen); MD5_Final(crypt_key[index], &ctx); } #endif return count; } static char *mystrndup(const char *s, size_t n) { size_t tmp; size_t size; char *ret; for (tmp = 0; s[tmp] != 0 && tmp <= n; tmp++); size = n; if (tmp < size) size = tmp; if ((ret = mem_alloc(sizeof(char) * size + 1)) == NULL) return NULL; memmove(ret, s, size); ret[size] = 0; return ret; } static size_t reqlen(char *str) { size_t len; for (len = 0; str[len] != 0 && str[len] != SEPARATOR; len++); return len; } static void *get_salt(char *ciphertext) { int nb; int i; char *request[SIZE_TAB]; char *str; static reqinfo_t *r; #ifdef __MMX__ __m64 h2[BINARY_SIZE / sizeof(__m64)]; __m64 conv[CIPHERTEXT_LENGTH / sizeof(__m64) + 1]; #else unsigned int h2[BINARY_SIZE / sizeof(unsigned int)]; uint32_t conv[(CIPHERTEXT_LENGTH / sizeof(uint32_t)) + 1]; #endif MD5_CTX ctx; /* parse the password string */ if (!r) r = mem_alloc_tiny(sizeof(*r), MEM_ALIGN_WORD); memset(r, 0, sizeof(*r)); for (nb = 0, i = 1; ciphertext[i] != 0; i++) { if (ciphertext[i] == SEPARATOR) { i++; request[nb] = mystrndup(&ciphertext[i], reqlen(&ciphertext[i])); nb++; if (!ciphertext[i]) break; } } while (nb < SIZE_TAB) { request[nb++] = NULL; } /* calculate h2 (h2 = md5(method:digestURI))*/ str = mem_alloc(strlen(request[R_METHOD]) + strlen(request[R_URI]) + 2); sprintf(str, "%s:%s", request[R_METHOD], request[R_URI]); MD5_Init(&ctx); MD5_Update(&ctx, str, strlen(str)); MD5_Final((unsigned char*)h2, &ctx); memset(conv, 0, CIPHERTEXT_LENGTH + 1); bin2ascii(conv, h2); MEM_FREE(str); /* create a part of h1 (h1tmp = request:realm:)*/ snprintf(r->h1tmp, HTMP - PLAINTEXT_LENGTH, "%s:%s:", request[R_USER], request[R_REALM]); /* create a part of h3 (h3tmp = nonce:noncecount:clientnonce:qop:h2)*/ if (request[R_CLIENTNONCE] == NULL) snprintf(&r->h3tmp[CIPHERTEXT_LENGTH], HTMP - CIPHERTEXT_LENGTH, ":%s:%s", request[R_NONCE], (char*)conv); else snprintf(&r->h3tmp[CIPHERTEXT_LENGTH], HTMP - CIPHERTEXT_LENGTH, ":%s:%s:%s:%s:%s", request[R_NONCE], request[R_NONCECOUNT], request[R_CLIENTNONCE], request[R_QOP], (char*)conv); r->h1tmplen = strlen(r->h1tmp); r->h3tmplen = strlen(&r->h3tmp[CIPHERTEXT_LENGTH]) + CIPHERTEXT_LENGTH; for (nb=0; nb < SIZE_TAB; ++nb) { MEM_FREE(request[nb]); } return r; } /* convert response to binary form */ static void *get_binary(char *ciphertext) { static unsigned int realcipher[BINARY_SIZE / sizeof(int)]; int i; ciphertext += 10; for (i = 0; i < BINARY_SIZE; i++) { ((unsigned char*)realcipher)[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; } return (void*) realcipher; } #ifdef SIMD_COEF_32 #define HASH_OFFSET (index&(SIMD_COEF_32-1))+((unsigned int)index/SIMD_COEF_32)*SIMD_COEF_32*4 static int get_hash_0(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_0; } static int get_hash_1(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_1; } static int get_hash_2(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_2; } static int get_hash_3(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_3; } static int get_hash_4(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_4; } static int get_hash_5(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_5; } static int get_hash_6(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_6; } #else static int get_hash_0(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_0; } static int get_hash_1(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_1; } static int get_hash_2(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_2; } static int get_hash_3(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_3; } static int get_hash_4(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_4; } static int get_hash_5(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_5; } static int get_hash_6(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_6; } #endif struct fmt_main fmt_HDAA = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT, { NULL }, { MAGIC }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
fci_rdm.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> //#include <omp.h> #include "config.h" #include "vhf/fblas.h" #include "fci.h" #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define CSUMTHR 1e-28 #define BUFBASE 96 #define SQRT2 1.4142135623730950488 #define BRAKETSYM 1 #define PARTICLESYM 2 /* * i is the index of the annihilation operator, a is the index of * creation operator. t1[I,i*norb+a] because it represents that * starting from the intermediate I, removing i and creating a leads to * determinant of str1 */ double FCIrdm2_a_t1ci(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkT *clink_indexa) { ci0 += strb_id; const int nnorb = norb * norb; int i, j, k, a, sign; size_t str1; const _LinkT *tab = clink_indexa + stra_id * nlinka; double *pt1, *pci; double csum = 0; for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci = ci0 + str1*nstrb; pt1 = t1 + i*norb+a; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { pt1[k*nnorb] += pci[k]; csum += pci[k] * pci[k]; } } else { for (k = 0; k < bcount; k++) { pt1[k*nnorb] -= pci[k]; csum += pci[k] * pci[k]; } } } return csum; } double FCIrdm2_b_t1ci(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkT *clink_indexb) { const int nnorb = norb * norb; int i, j, a, str0, str1, sign; const _LinkT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci0 + stra_id*(size_t)nstrb; double csum = 0; for (str0 = 0; str0 < bcount; str0++) { for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (sign == 0) { break; } else { t1[i*norb+a] += sign * pci[str1]; csum += pci[str1] * pci[str1]; } } t1 += nnorb; tab += nlinkb; } return csum; } double FCIrdm2_0b_t1ci(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkT *clink_indexb) { const int nnorb = norb * norb; int i, j, a, str0, str1, sign; const _LinkT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci0 + stra_id*(size_t)nstrb; double csum = 0; for (str0 = 0; str0 < bcount; str0++) { memset(t1, 0, sizeof(double) * nnorb); for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); t1[i*norb+a] += sign * pci[str1]; csum += pci[str1] * pci[str1]; } t1 += nnorb; tab += nlinkb; } return csum; } /* spin free E^i_j | ci0 > */ double FCI_t1ci_sf(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { double csum; csum = FCIrdm2_0b_t1ci(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb) + FCIrdm2_a_t1ci (ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); return csum; } static void tril_particle_symm(double *rdm2, double *tbra, double *tket, int bcount, int norb, double alpha, double beta) { const char TRANS_N = 'N'; const char TRANS_T = 'T'; int nnorb = norb * norb; int i, j, k, m, n; int blk = MIN(((int)(48/norb))*norb, nnorb); double *buf = malloc(sizeof(double) * nnorb*bcount); double *p1; for (n = 0, k = 0; k < bcount; k++) { p1 = tbra + k * nnorb; for (i = 0; i < norb; i++) { for (j = 0; j < norb; j++, n++) { buf[n] = p1[j*norb+i]; } } } // dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, // &alpha, tket, &nnorb, buf, &nnorb, &beta, rdm2, &nnorb); for (m = 0; m < nnorb-blk; m+=blk) { n = nnorb - m; dgemm_(&TRANS_N, &TRANS_T, &blk, &n, &bcount, &alpha, tket+m, &nnorb, buf+m, &nnorb, &beta, rdm2+m*nnorb+m, &nnorb); } n = nnorb - m; dgemm_(&TRANS_N, &TRANS_T, &n, &n, &bcount, &alpha, tket+m, &nnorb, buf+m, &nnorb, &beta, rdm2+m*nnorb+m, &nnorb); free(buf); } static void _transpose_jikl(double *dm2, int norb) { int nnorb = norb * norb; int i, j; double *p0, *p1; double *tmp = malloc(sizeof(double)*nnorb*nnorb); memcpy(tmp, dm2, sizeof(double)*nnorb*nnorb); for (i = 0; i < norb; i++) { for (j = 0; j < norb; j++) { p0 = tmp + (j*norb+i) * nnorb; p1 = dm2 + (i*norb+j) * nnorb; memcpy(p1, p0, sizeof(double)*nnorb); } } free(tmp); } /* * Note! The returned rdm2 from FCI*kern* function corresponds to * [(p^+ q on <bra|) r^+ s] = [p q^+ r^+ s] * in FCIrdm12kern_sf, FCIrdm12kern_spin0, FCIrdm12kern_a, ... * t1 is calculated as |K> = i^+ j|0>. by doing dot(t1.T,t1) to get "rdm2", * The ket part (k^+ l|0>) will generate the correct order for the last * two indices kl of rdm2(i,j,k,l), But the bra part (i^+ j|0>)^dagger * will generate an order of (i,j), which is identical to call a bra of * (<0|i j^+). The so-obtained rdm2(i,j,k,l) corresponds to the * operator sequence i j^+ k^+ l. * * symm = 1: symmetrizes the 1pdm, and 2pdm. This is true only if bra == ket, * and the operators on bra are equivalent to those on ket, like * FCIrdm12kern_a, FCIrdm12kern_b, FCIrdm12kern_sf, FCIrdm12kern_spin0 * sym = 2: consider the particle permutation symmetry: * E^j_l E^i_k = E^i_k E^j_l - \delta_{il}E^j_k + \dleta_{jk}E^i_l */ void FCIrdm12_drv(void (*dm12kernel)(), double *rdm1, double *rdm2, double *bra, double *ket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb, int symm) { const int nnorb = norb * norb; int strk, i, j, k, l, ib, blen; double *pdm1, *pdm2; memset(rdm1, 0, sizeof(double) * nnorb); memset(rdm2, 0, sizeof(double) * nnorb*nnorb); _LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na); _LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clinka, link_indexa, norb, na, nlinka); FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb); #pragma omp parallel default(none) \ shared(dm12kernel, bra, ket, norb, na, nb, nlinka, \ nlinkb, clinka, clinkb, rdm1, rdm2, symm), \ private(strk, i, ib, blen, pdm1, pdm2) { pdm1 = calloc(nnorb+2, sizeof(double)); pdm2 = calloc(nnorb*nnorb+2, sizeof(double)); #pragma omp for schedule(dynamic, 40) for (strk = 0; strk < na; strk++) { for (ib = 0; ib < nb; ib += BUFBASE) { blen = MIN(BUFBASE, nb-ib); (*dm12kernel)(pdm1, pdm2, bra, ket, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb, symm); } } #pragma omp critical { for (i = 0; i < nnorb; i++) { rdm1[i] += pdm1[i]; } for (i = 0; i < nnorb*nnorb; i++) { rdm2[i] += pdm2[i]; } } free(pdm1); free(pdm2); } free(clinka); free(clinkb); switch (symm) { case BRAKETSYM: for (i = 0; i < norb; i++) { for (j = 0; j < i; j++) { rdm1[j*norb+i] = rdm1[i*norb+j]; } } for (i = 0; i < nnorb; i++) { for (j = 0; j < i; j++) { rdm2[j*nnorb+i] = rdm2[i*nnorb+j]; } } _transpose_jikl(rdm2, norb); break; case PARTICLESYM: // right 2pdm order is required here, which transposes the cre/des on bra for (i = 0; i < norb; i++) { for (j = 0; j < i; j++) { pdm1 = rdm2 + (i*nnorb+j)*norb; pdm2 = rdm2 + (j*nnorb+i)*norb; for (k = 0; k < norb; k++) { for (l = 0; l < norb; l++) { pdm2[l*nnorb+k] = pdm1[k*nnorb+l]; } } // E^j_lE^i_k = E^i_kE^j_l + \delta_{il}E^j_k - \dleta_{jk}E^i_l for (k = 0; k < norb; k++) { pdm2[i*nnorb+k] += rdm1[j*norb+k]; pdm2[k*nnorb+j] -= rdm1[i*norb+k]; } } } break; default: _transpose_jikl(rdm2, norb); } } void FCIrdm12kern_sf(double *rdm1, double *rdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const int INC1 = 1; const char UP = 'U'; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *buf = malloc(sizeof(double) * nnorb * bcount); csum = FCI_t1ci_sf(ket, buf, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (csum > CSUMTHR) { dgemv_(&TRANS_N, &nnorb, &bcount, &D1, buf, &nnorb, ket+stra_id*nb+strb_id, &INC1, &D1, rdm1, &INC1); switch (symm) { case BRAKETSYM: dsyrk_(&UP, &TRANS_N, &nnorb, &bcount, &D1, buf, &nnorb, &D1, rdm2, &nnorb); break; case PARTICLESYM: tril_particle_symm(rdm2, buf, buf, bcount, norb, 1, 1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, buf, &nnorb, buf, &nnorb, &D1, rdm2, &nnorb); } } free(buf); } /* * _spin0 assumes the strict symmetry on alpha and beta electrons */ void FCIrdm12kern_spin0(double *rdm1, double *rdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { if (stra_id < strb_id) { return; } const int INC1 = 1; const char UP = 'U'; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const double D2 = 2; const int nnorb = norb * norb; int fill0, fill1, i; double csum = 0; double *buf = calloc(nnorb * na, sizeof(double)); if (strb_id+bcount <= stra_id) { fill0 = bcount; fill1 = bcount; csum = FCIrdm2_b_t1ci(ket, buf, fill0, stra_id, strb_id, norb, na, nlinka, clink_indexa) + FCIrdm2_a_t1ci(ket, buf, fill1, stra_id, strb_id, norb, na, nlinka, clink_indexa); } else if (stra_id >= strb_id) { fill0 = stra_id - strb_id; fill1 = stra_id - strb_id + 1; csum = FCIrdm2_b_t1ci(ket, buf, fill0, stra_id, strb_id, norb, na, nlinka, clink_indexa) + FCIrdm2_a_t1ci(ket, buf, fill1, stra_id, strb_id, norb, na, nlinka, clink_indexa); } if (csum > CSUMTHR) { dgemv_(&TRANS_N, &nnorb, &fill1, &D2, buf, &nnorb, ket+stra_id*na+strb_id, &INC1, &D1, rdm1, &INC1); for (i = fill0*nnorb; i < fill1*nnorb; i++) { buf[i] *= SQRT2; } switch (symm) { case BRAKETSYM: dsyrk_(&UP, &TRANS_N, &nnorb, &fill1, &D2, buf, &nnorb, &D1, rdm2, &nnorb); break; case PARTICLESYM: tril_particle_symm(rdm2, buf, buf, fill1, norb, D2, D1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &fill1, &D2, buf, &nnorb, buf, &nnorb, &D1, rdm2, &nnorb); } } free(buf); } /* * *********************************************** * transition density matrix, spin free */ void FCItdm12kern_sf(double *tdm1, double *tdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const int INC1 = 1; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *buf0 = malloc(sizeof(double) * nnorb*bcount); double *buf1 = malloc(sizeof(double) * nnorb*bcount); csum = FCI_t1ci_sf(bra, buf1, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (csum < CSUMTHR) { goto _normal_end; } csum = FCI_t1ci_sf(ket, buf0, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (csum < CSUMTHR) { goto _normal_end; } dgemv_(&TRANS_N, &nnorb, &bcount, &D1, buf0, &nnorb, bra+stra_id*nb+strb_id, &INC1, &D1, tdm1, &INC1); switch (symm) { case PARTICLESYM: tril_particle_symm(tdm2, buf1, buf0, bcount, norb, D1, D1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, buf0, &nnorb, buf1, &nnorb, &D1, tdm2, &nnorb); } _normal_end: free(buf0); free(buf1); } /* * *********************************************** * 2pdm kernel for alpha^i alpha_j | ci0 > * *********************************************** */ void FCIrdm12kern_a(double *rdm1, double *rdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const int INC1 = 1; const char UP = 'U'; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *buf = calloc(nnorb*bcount, sizeof(double)); csum = FCIrdm2_a_t1ci(ket, buf, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); if (csum > CSUMTHR) { dgemv_(&TRANS_N, &nnorb, &bcount, &D1, buf, &nnorb, ket+stra_id*nb+strb_id, &INC1, &D1, rdm1, &INC1); switch (symm) { case BRAKETSYM: dsyrk_(&UP, &TRANS_N, &nnorb, &bcount, &D1, buf, &nnorb, &D1, rdm2, &nnorb); break; case PARTICLESYM: tril_particle_symm(rdm2, buf, buf, bcount, norb, 1, 1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, buf, &nnorb, buf, &nnorb, &D1, rdm2, &nnorb); } } free(buf); } /* * 2pdm kernel for beta^i beta_j | ci0 > */ void FCIrdm12kern_b(double *rdm1, double *rdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const int INC1 = 1; const char UP = 'U'; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *buf = calloc(nnorb*bcount, sizeof(double)); csum = FCIrdm2_b_t1ci(ket, buf, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); if (csum > CSUMTHR) { dgemv_(&TRANS_N, &nnorb, &bcount, &D1, buf, &nnorb, ket+stra_id*nb+strb_id, &INC1, &D1, rdm1, &INC1); switch (symm) { case BRAKETSYM: dsyrk_(&UP, &TRANS_N, &nnorb, &bcount, &D1, buf, &nnorb, &D1, rdm2, &nnorb); break; case PARTICLESYM: tril_particle_symm(rdm2, buf, buf, bcount, norb, 1, 1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, buf, &nnorb, buf, &nnorb, &D1, rdm2, &nnorb); } } free(buf); } void FCItdm12kern_a(double *tdm1, double *tdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const int INC1 = 1; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *buf0 = calloc(nnorb*bcount, sizeof(double)); double *buf1 = calloc(nnorb*bcount, sizeof(double)); csum = FCIrdm2_a_t1ci(bra, buf1, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); if (csum < CSUMTHR) { goto _normal_end; } csum = FCIrdm2_a_t1ci(ket, buf0, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); if (csum < CSUMTHR) { goto _normal_end; } dgemv_(&TRANS_N, &nnorb, &bcount, &D1, buf0, &nnorb, bra+stra_id*nb+strb_id, &INC1, &D1, tdm1, &INC1); switch (symm) { case PARTICLESYM: tril_particle_symm(tdm2, buf1, buf0, bcount, norb, D1, D1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, buf0, &nnorb, buf1, &nnorb, &D1, tdm2, &nnorb); } _normal_end: free(buf0); free(buf1); } void FCItdm12kern_b(double *tdm1, double *tdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const int INC1 = 1; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *buf0 = calloc(nnorb*bcount, sizeof(double)); double *buf1 = calloc(nnorb*bcount, sizeof(double)); csum = FCIrdm2_b_t1ci(bra, buf1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); if (csum < CSUMTHR) { goto _normal_end; } csum = FCIrdm2_b_t1ci(ket, buf0, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); if (csum < CSUMTHR) { goto _normal_end; } dgemv_(&TRANS_N, &nnorb, &bcount, &D1, buf0, &nnorb, bra+stra_id*nb+strb_id, &INC1, &D1, tdm1, &INC1); switch (symm) { case PARTICLESYM: tril_particle_symm(tdm2, buf1, buf0, bcount, norb, D1, D1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, buf0, &nnorb, buf1, &nnorb, &D1, tdm2, &nnorb); } _normal_end: free(buf0); free(buf1); } void FCItdm12kern_ab(double *tdm1, double *tdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *bufb = calloc(nnorb*bcount, sizeof(double)); double *bufa = calloc(nnorb*bcount, sizeof(double)); csum = FCIrdm2_a_t1ci(bra, bufa, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); if (csum < CSUMTHR) { goto _normal_end; } csum = FCIrdm2_b_t1ci(ket, bufb, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); if (csum < CSUMTHR) { goto _normal_end; } // no particle symmetry between alpha-alpha-beta-beta 2pdm dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, bufb, &nnorb, bufa, &nnorb, &D1, tdm2, &nnorb); _normal_end: free(bufb); free(bufa); } /* * *********************************************** * 1-pdm * *********************************************** */ void FCItrans_rdm1a(double *rdm1, double *bra, double *ket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int i, a, j, k, str0, str1, sign; double *pket, *pbra; _LinkT *tab; _LinkT *clink = malloc(sizeof(_LinkT) * nlinka * na); FCIcompress_link(clink, link_indexa, norb, na, nlinka); memset(rdm1, 0, sizeof(double) * norb*norb); for (str0 = 0; str0 < na; str0++) { tab = clink + str0 * nlinka; pket = ket + str0 * nb; for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pbra = bra + str1 * nb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < nb; k++) { rdm1[a*norb+i] += pbra[k]*pket[k]; } } else { for (k = 0; k < nb; k++) { rdm1[a*norb+i] -= pbra[k]*pket[k]; } } } } free(clink); } void FCItrans_rdm1b(double *rdm1, double *bra, double *ket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int i, a, j, k, str0, str1, sign; double *pket, *pbra; double tmp; _LinkT *tab; _LinkT *clink = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clink, link_indexb, norb, nb, nlinkb); memset(rdm1, 0, sizeof(double) * norb*norb); for (str0 = 0; str0 < na; str0++) { pbra = bra + str0 * nb; pket = ket + str0 * nb; for (k = 0; k < nb; k++) { tab = clink + k * nlinkb; tmp = pket[k]; for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (sign == 0) { break; } else { rdm1[a*norb+i] += sign*pbra[str1]*tmp; } } } } free(clink); } /* * make_rdm1 assumed the hermitian of density matrix */ void FCImake_rdm1a(double *rdm1, double *cibra, double *ciket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int i, a, j, k, str0, str1, sign; double *pci0, *pci1; double *ci0 = ciket; _LinkT *tab; _LinkT *clink = malloc(sizeof(_LinkT) * nlinka * na); FCIcompress_link(clink, link_indexa, norb, na, nlinka); memset(rdm1, 0, sizeof(double) * norb*norb); for (str0 = 0; str0 < na; str0++) { tab = clink + str0 * nlinka; pci0 = ci0 + str0 * nb; for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci1 = ci0 + str1 * nb; if (a >= i) { if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < nb; k++) { rdm1[a*norb+i] += pci0[k]*pci1[k]; } } else { for (k = 0; k < nb; k++) { rdm1[a*norb+i] -= pci0[k]*pci1[k]; } } } } } for (j = 0; j < norb; j++) { for (k = 0; k < j; k++) { rdm1[k*norb+j] = rdm1[j*norb+k]; } } free(clink); } void FCImake_rdm1b(double *rdm1, double *cibra, double *ciket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int i, a, j, k, str0, str1, sign; double *pci0; double *ci0 = ciket; double tmp; _LinkT *tab; _LinkT *clink = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clink, link_indexb, norb, nb, nlinkb); memset(rdm1, 0, sizeof(double) * norb*norb); for (str0 = 0; str0 < na; str0++) { pci0 = ci0 + str0 * nb; for (k = 0; k < nb; k++) { tab = clink + k * nlinkb; tmp = pci0[k]; for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (a >= i) { if (sign == 0) { break; } else if (sign > 0) { rdm1[a*norb+i] += pci0[str1]*tmp; } else { rdm1[a*norb+i] -= pci0[str1]*tmp; } } } } } for (j = 0; j < norb; j++) { for (k = 0; k < j; k++) { rdm1[k*norb+j] = rdm1[j*norb+k]; } } free(clink); }
yescrypt-opt.c
/*- * Copyright 2009 Colin Percival * Copyright 2013,2014 Alexander Peslyak * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file was originally written by Colin Percival as part of the Tarsnap * online backup system. */ #include <errno.h> #include <stdint.h> #include <stdlib.h> #include "sha256_Y.h" #include "sysendian.h" #include "yescrypt-platform.c" static inline uint32_t le32dec(const void *pp) { const uint8_t *p = (uint8_t const *)pp; return ((uint32_t)(p[0]) + ((uint32_t)(p[1]) << 8) + ((uint32_t)(p[2]) << 16) + ((uint32_t)(p[3]) << 24)); } static inline void le32enc(void *pp, uint32_t x) { uint8_t * p = (uint8_t *)pp; p[0] = x & 0xff; p[1] = (x >> 8) & 0xff; p[2] = (x >> 16) & 0xff; p[3] = (x >> 24) & 0xff; } static inline void blkcpy(uint64_t * dest, const uint64_t * src, size_t count) { do { *dest++ = *src++; *dest++ = *src++; *dest++ = *src++; *dest++ = *src++; } while (count -= 4); } static inline void blkxor(uint64_t * dest, const uint64_t * src, size_t count) { do { *dest++ ^= *src++; *dest++ ^= *src++; *dest++ ^= *src++; *dest++ ^= *src++; } while (count -= 4); } typedef union { uint32_t w[16]; uint64_t d[8]; } salsa20_blk_t; static inline void salsa20_simd_shuffle(const salsa20_blk_t * Bin, salsa20_blk_t * Bout) { #define COMBINE(out, in1, in2) \ Bout->d[out] = Bin->w[in1 * 2] | ((uint64_t)Bin->w[in2 * 2 + 1] << 32); COMBINE(0, 0, 2) COMBINE(1, 5, 7) COMBINE(2, 2, 4) COMBINE(3, 7, 1) COMBINE(4, 4, 6) COMBINE(5, 1, 3) COMBINE(6, 6, 0) COMBINE(7, 3, 5) #undef COMBINE } static inline void salsa20_simd_unshuffle(const salsa20_blk_t * Bin, salsa20_blk_t * Bout) { #define COMBINE(out, in1, in2) \ Bout->w[out * 2] = Bin->d[in1]; \ Bout->w[out * 2 + 1] = Bin->d[in2] >> 32; COMBINE(0, 0, 6) COMBINE(1, 5, 3) COMBINE(2, 2, 0) COMBINE(3, 7, 5) COMBINE(4, 4, 2) COMBINE(5, 1, 7) COMBINE(6, 6, 4) COMBINE(7, 3, 1) #undef COMBINE } /** * salsa20_8(B): * Apply the salsa20/8 core to the provided block. */ static void salsa20_8(uint64_t B[8]) { size_t i; salsa20_blk_t X; #define x X.w salsa20_simd_unshuffle((const salsa20_blk_t *)B, &X); for (i = 0; i < 8; i += 2) { #define R(a,b) (((a) << (b)) | ((a) >> (32 - (b)))) /* Operate on columns */ x[ 4] ^= R(x[ 0]+x[12], 7); x[ 8] ^= R(x[ 4]+x[ 0], 9); x[12] ^= R(x[ 8]+x[ 4],13); x[ 0] ^= R(x[12]+x[ 8],18); x[ 9] ^= R(x[ 5]+x[ 1], 7); x[13] ^= R(x[ 9]+x[ 5], 9); x[ 1] ^= R(x[13]+x[ 9],13); x[ 5] ^= R(x[ 1]+x[13],18); x[14] ^= R(x[10]+x[ 6], 7); x[ 2] ^= R(x[14]+x[10], 9); x[ 6] ^= R(x[ 2]+x[14],13); x[10] ^= R(x[ 6]+x[ 2],18); x[ 3] ^= R(x[15]+x[11], 7); x[ 7] ^= R(x[ 3]+x[15], 9); x[11] ^= R(x[ 7]+x[ 3],13); x[15] ^= R(x[11]+x[ 7],18); /* Operate on rows */ x[ 1] ^= R(x[ 0]+x[ 3], 7); x[ 2] ^= R(x[ 1]+x[ 0], 9); x[ 3] ^= R(x[ 2]+x[ 1],13); x[ 0] ^= R(x[ 3]+x[ 2],18); x[ 6] ^= R(x[ 5]+x[ 4], 7); x[ 7] ^= R(x[ 6]+x[ 5], 9); x[ 4] ^= R(x[ 7]+x[ 6],13); x[ 5] ^= R(x[ 4]+x[ 7],18); x[11] ^= R(x[10]+x[ 9], 7); x[ 8] ^= R(x[11]+x[10], 9); x[ 9] ^= R(x[ 8]+x[11],13); x[10] ^= R(x[ 9]+x[ 8],18); x[12] ^= R(x[15]+x[14], 7); x[13] ^= R(x[12]+x[15], 9); x[14] ^= R(x[13]+x[12],13); x[15] ^= R(x[14]+x[13],18); #undef R } #undef x { salsa20_blk_t Y; salsa20_simd_shuffle(&X, &Y); for (i = 0; i < 16; i += 4) { ((salsa20_blk_t *)B)->w[i] += Y.w[i]; ((salsa20_blk_t *)B)->w[i + 1] += Y.w[i + 1]; ((salsa20_blk_t *)B)->w[i + 2] += Y.w[i + 2]; ((salsa20_blk_t *)B)->w[i + 3] += Y.w[i + 3]; } } } /** * blockmix_salsa8(Bin, Bout, X, r): * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r * bytes in length; the output Bout must also be the same size. The * temporary space X must be 64 bytes. */ static void blockmix_salsa8(const uint64_t * Bin, uint64_t * Bout, uint64_t * X, size_t r) { size_t i; /* 1: X <-- B_{2r - 1} */ blkcpy(X, &Bin[(2 * r - 1) * 8], 8); /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < 2 * r; i += 2) { /* 3: X <-- H(X \xor B_i) */ blkxor(X, &Bin[i * 8], 8); salsa20_8(X); /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ blkcpy(&Bout[i * 4], X, 8); /* 3: X <-- H(X \xor B_i) */ blkxor(X, &Bin[i * 8 + 8], 8); salsa20_8(X); /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ blkcpy(&Bout[i * 4 + r * 8], X, 8); } } /* These are tunable */ #define S_BITS 8 #define S_SIMD 2 #define S_P 4 #define S_ROUNDS 6 /* Number of S-boxes. Not tunable, hard-coded in a few places. */ #define S_N 2 /* Derived values. Not tunable on their own. */ #define S_SIZE1 (1 << S_BITS) #define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8) #define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK) #define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD) #define S_P_SIZE (S_P * S_SIMD) #define S_MIN_R ((S_P * S_SIMD + 15) / 16) /** * pwxform(B): * Transform the provided block using the provided S-boxes. */ static void block_pwxform(uint64_t * B, const uint64_t * S) { uint64_t (*X)[S_SIMD] = (uint64_t (*)[S_SIMD])B; const uint8_t *S0 = (const uint8_t *)S; const uint8_t *S1 = (const uint8_t *)(S + S_SIZE1 * S_SIMD); size_t i, j; #if S_SIMD > 2 size_t k; #endif for (j = 0; j < S_P; j++) { uint64_t *Xj = X[j]; uint64_t x0 = Xj[0]; #if S_SIMD > 1 uint64_t x1 = Xj[1]; #endif for (i = 0; i < S_ROUNDS; i++) { uint64_t x = x0 & S_MASK2; const uint64_t *p0, *p1; p0 = (const uint64_t *)(S0 + (uint32_t)x); p1 = (const uint64_t *)(S1 + (x >> 32)); x0 = (uint64_t)(x0 >> 32) * (uint32_t)x0; x0 += p0[0]; x0 ^= p1[0]; #if S_SIMD > 1 x1 = (uint64_t)(x1 >> 32) * (uint32_t)x1; x1 += p0[1]; x1 ^= p1[1]; #endif #if S_SIMD > 2 for (k = 2; k < S_SIMD; k++) { x = Xj[k]; x = (uint64_t)(x >> 32) * (uint32_t)x; x += p0[k]; x ^= p1[k]; Xj[k] = x; } #endif } Xj[0] = x0; #if S_SIMD > 1 Xj[1] = x1; #endif } } /** * blockmix_pwxform(Bin, Bout, S, r): * Compute Bout = BlockMix_pwxform{salsa20/8, S, r}(Bin). The input Bin must * be 128r bytes in length; the output Bout must also be the same size. * * S lacks const qualifier to match blockmix_salsa8()'s prototype, which we * need to refer to both functions via the same function pointers. */ static void blockmix_pwxform(const uint64_t * Bin, uint64_t * Bout, uint64_t * S, size_t r) { size_t r1, r2, i; /* Convert 128-byte blocks to (S_P_SIZE * 64-bit) blocks */ r1 = r * 128 / (S_P_SIZE * 8); /* X <-- B_{r1 - 1} */ blkcpy(Bout, &Bin[(r1 - 1) * S_P_SIZE], S_P_SIZE); /* X <-- X \xor B_i */ blkxor(Bout, Bin, S_P_SIZE); /* X <-- H'(X) */ /* B'_i <-- X */ block_pwxform(Bout, S); /* for i = 0 to r1 - 1 do */ for (i = 1; i < r1; i++) { /* X <-- X \xor B_i */ blkcpy(&Bout[i * S_P_SIZE], &Bout[(i - 1) * S_P_SIZE], S_P_SIZE); blkxor(&Bout[i * S_P_SIZE], &Bin[i * S_P_SIZE], S_P_SIZE); /* X <-- H'(X) */ /* B'_i <-- X */ block_pwxform(&Bout[i * S_P_SIZE], S); } /* Handle partial blocks */ if (i * S_P_SIZE < r * 16) blkcpy(&Bout[i * S_P_SIZE], &Bin[i * S_P_SIZE], r * 16 - i * S_P_SIZE); i = (r1 - 1) * S_P_SIZE / 8; /* Convert 128-byte blocks to 64-byte blocks */ r2 = r * 2; /* B'_i <-- H(B'_i) */ salsa20_8(&Bout[i * 8]); i++; for (; i < r2; i++) { /* B'_i <-- H(B'_i \xor B'_{i-1}) */ blkxor(&Bout[i * 8], &Bout[(i - 1) * 8], 8); salsa20_8(&Bout[i * 8]); } } /** * integerify(B, r): * Return the result of parsing B_{2r-1} as a little-endian integer. */ static inline uint64_t integerify(const uint64_t * B, size_t r) { /* * Our 64-bit words are in host byte order, and word 6 holds the second 32-bit * word of B_{2r-1} due to SIMD shuffling. The 64-bit value we return is also * in host byte order, as it should be. */ const uint64_t * X = &B[(2 * r - 1) * 8]; uint32_t lo = X[0]; uint32_t hi = X[6] >> 32; return ((uint64_t)hi << 32) + lo; } /** * smix1(B, r, N, flags, V, NROM, shared, XY, S): * Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r + 64 bytes in length. The value N must be even and * no smaller than 2. */ static void smix1(uint64_t * B, size_t r, uint64_t N, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { void (*blockmix)(const uint64_t *, uint64_t *, uint64_t *, size_t) = (S ? blockmix_pwxform : blockmix_salsa8); const uint64_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1; size_t s = 16 * r; uint64_t * X = V; uint64_t * Y = &XY[s]; uint64_t * Z = S ? S : &XY[2 * s]; uint64_t n, i, j; size_t k; /* 1: X <-- B */ /* 3: V_i <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&B[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&X[i * 8]; for (k = 0; k < 16; k++) tmp->w[k] = le32dec(&src->w[k]); salsa20_simd_shuffle(tmp, dst); } /* 4: X <-- H(X) */ /* 3: V_i <-- X */ blockmix(X, Y, Z, r); blkcpy(&V[s], Y, s); X = XY; if (NROM && (VROM_mask & 1)) { if ((1 & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j = integerify(Y, r) & (NROM - 1); /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } blockmix(Y, X, Z, r); /* 2: for i = 0 to N - 1 do */ for (n = 1, i = 2; i < N; i += 2) { /* 3: V_i <-- X */ blkcpy(&V[i * s], X, s); if ((i & (i - 1)) == 0) n <<= 1; /* j <-- Wrap(Integerify(X), i) */ j = integerify(X, r) & (n - 1); j += i - n; /* X <-- X \xor V_j */ blkxor(X, &V[j * s], s); /* 4: X <-- H(X) */ blockmix(X, Y, Z, r); /* 3: V_i <-- X */ blkcpy(&V[(i + 1) * s], Y, s); j = integerify(Y, r); if (((i + 1) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } else { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i + 1 - n; /* X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); } blockmix(Y, X, Z, r); } } else { yescrypt_flags_t rw = flags & YESCRYPT_RW; /* 4: X <-- H(X) */ blockmix(Y, X, Z, r); /* 2: for i = 0 to N - 1 do */ for (n = 1, i = 2; i < N; i += 2) { /* 3: V_i <-- X */ blkcpy(&V[i * s], X, s); if (rw) { if ((i & (i - 1)) == 0) n <<= 1; /* j <-- Wrap(Integerify(X), i) */ j = integerify(X, r) & (n - 1); j += i - n; /* X <-- X \xor V_j */ blkxor(X, &V[j * s], s); } /* 4: X <-- H(X) */ blockmix(X, Y, Z, r); /* 3: V_i <-- X */ blkcpy(&V[(i + 1) * s], Y, s); if (rw) { /* j <-- Wrap(Integerify(X), i) */ j = integerify(Y, r) & (n - 1); j += (i + 1) - n; /* X <-- X \xor V_j */ blkxor(Y, &V[j * s], s); } /* 4: X <-- H(X) */ blockmix(Y, X, Z, r); } } /* B' <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&X[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&B[i * 8]; for (k = 0; k < 16; k++) le32enc(&tmp->w[k], src->w[k]); salsa20_simd_unshuffle(tmp, dst); } } /** * smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S): * Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r + 64 bytes in length. The value N must be a * power of 2 greater than 1. The value Nloop must be even. */ static void smix2(uint64_t * B, size_t r, uint64_t N, uint64_t Nloop, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { void (*blockmix)(const uint64_t *, uint64_t *, uint64_t *, size_t) = (S ? blockmix_pwxform : blockmix_salsa8); const uint64_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1 | 1; size_t s = 16 * r; yescrypt_flags_t rw = flags & YESCRYPT_RW; uint64_t * X = XY; uint64_t * Y = &XY[s]; uint64_t * Z = S ? S : &XY[2 * s]; uint64_t i, j; size_t k; if (Nloop == 0) return; /* X <-- B' */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&B[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&X[i * 8]; for (k = 0; k < 16; k++) tmp->w[k] = le32dec(&src->w[k]); salsa20_simd_shuffle(tmp, dst); } if (NROM) { /* 6: for i = 0 to N - 1 do */ for (i = 0; i < Nloop; i += 2) { /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(X, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], X, s); blockmix(X, Y, Z, r); j = integerify(Y, r); if (((i + 1) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } else { /* 7: j <-- Integerify(X) mod N */ j &= N - 1; /* 8: X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], Y, s); } blockmix(Y, X, Z, r); } } else { /* 6: for i = 0 to N - 1 do */ i = Nloop / 2; do { /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(X, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], X, s); blockmix(X, Y, Z, r); /* 7: j <-- Integerify(X) mod N */ j = integerify(Y, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], Y, s); blockmix(Y, X, Z, r); } while (--i); } /* 10: B' <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&X[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&B[i * 8]; for (k = 0; k < 16; k++) le32enc(&tmp->w[k], src->w[k]); salsa20_simd_unshuffle(tmp, dst); } } /** * p2floor(x): * Largest power of 2 not greater than argument. */ static uint64_t p2floor(uint64_t x) { uint64_t y; while ((y = x & (x - 1))) x = y; return x; } /** * smix(B, r, N, p, t, flags, V, NROM, shared, XY, S): * Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the * temporary storage V must be 128rN bytes in length; the temporary storage * XY must be 256r+64 or (256r+64)*p bytes in length (the larger size is * required with OpenMP-enabled builds). The value N must be a power of 2 * greater than 1. */ static void smix(uint64_t * B, size_t r, uint64_t N, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { size_t s = 16 * r; uint64_t Nchunk = N / p, Nloop_all, Nloop_rw; uint32_t i; Nloop_all = Nchunk; if (flags & YESCRYPT_RW) { if (t <= 1) { if (t) Nloop_all *= 2; /* 2/3 */ Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */ } else { Nloop_all *= t - 1; } } else if (t) { if (t == 1) Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */ Nloop_all *= t; } Nloop_rw = 0; if (flags & __YESCRYPT_INIT_SHARED) Nloop_rw = Nloop_all; else if (flags & YESCRYPT_RW) Nloop_rw = Nloop_all / p; Nchunk &= ~(uint64_t)1; /* round down to even */ Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */ Nloop_rw &= ~(uint64_t)1; /* round down to even */ #ifdef _OPENMP #pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw) { #pragma omp for #endif for (i = 0; i < p; i++) { uint64_t Vchunk = i * Nchunk; uint64_t * Bp = &B[i * s]; uint64_t * Vp = &V[Vchunk * s]; #ifdef _OPENMP uint64_t * XYp = &XY[i * (2 * s + 8)]; #else uint64_t * XYp = XY; #endif uint64_t Np = (i < p - 1) ? Nchunk : (N - Vchunk); uint64_t * Sp = S ? &S[i * S_SIZE_ALL] : S; if (Sp) smix1(Bp, 1, S_SIZE_ALL / 16, flags & ~YESCRYPT_PWXFORM, Sp, NROM, shared, XYp, NULL); if (!(flags & __YESCRYPT_INIT_SHARED_2)) smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp); smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp, NROM, shared, XYp, Sp); } if (Nloop_all > Nloop_rw) { #ifdef _OPENMP #pragma omp for #endif for (i = 0; i < p; i++) { uint64_t * Bp = &B[i * s]; #ifdef _OPENMP uint64_t * XYp = &XY[i * (2 * s + 8)]; #else uint64_t * XYp = XY; #endif uint64_t * Sp = S ? &S[i * S_SIZE_ALL] : S; smix2(Bp, r, N, Nloop_all - Nloop_rw, flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp); } } #ifdef _OPENMP } #endif } /** * yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen, * N, r, p, t, flags, buf, buflen): * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r, * p, buflen), or a revision of scrypt as requested by flags and shared, and * write the result into buf. The parameters r, p, and buflen must satisfy * r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power * of 2 greater than 1. * * t controls computation time while not affecting peak memory usage. shared * and flags may request special modes as described in yescrypt.h. local is * the thread-local data structure, allowing to preserve and reuse a memory * allocation across calls, thereby reducing its overhead. * * Return 0 on success; or -1 on error. */ int yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local, const uint8_t * passwd, size_t passwdlen, const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint8_t * buf, size_t buflen) { yescrypt_region_t tmp; uint64_t NROM; size_t B_size, V_size, XY_size, need; uint64_t * B, * V, * XY, * S; uint64_t sha256[4]; /* * YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose, * so don't let it have side-effects. Without this adjustment, it'd * enable the SHA-256 password pre-hashing and output post-hashing, * because any deviation from classic scrypt implies those. */ if (p == 1) flags &= ~YESCRYPT_PARALLEL_SMIX; /* Sanity-check parameters */ if (flags & ~YESCRYPT_KNOWN_FLAGS) { errno = EINVAL; return -1; } #if SIZE_MAX > UINT32_MAX if (buflen > (((uint64_t)(1) << 32) - 1) * 32) { errno = EFBIG; return -1; } #endif if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) { errno = EFBIG; return -1; } if (((N & (N - 1)) != 0) || (N <= 1) || (r < 1) || (p < 1)) { errno = EINVAL; return -1; } if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 1)) { errno = EINVAL; return -1; } #if S_MIN_R > 1 if ((flags & YESCRYPT_PWXFORM) && (r < S_MIN_R)) { errno = EINVAL; return -1; } #endif if ((p > SIZE_MAX / ((size_t)256 * r + 64)) || #if SIZE_MAX / 256 <= UINT32_MAX (r > SIZE_MAX / 256) || #endif (N > SIZE_MAX / 128 / r)) { errno = ENOMEM; return -1; } if (N > UINT64_MAX / ((uint64_t)t + 1)) { errno = EFBIG; return -1; } #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX) && (N > SIZE_MAX / 128 / (r * p))) { errno = ENOMEM; return -1; } #endif if ((flags & YESCRYPT_PWXFORM) && #ifndef _OPENMP (flags & YESCRYPT_PARALLEL_SMIX) && #endif p > SIZE_MAX / (S_SIZE_ALL * sizeof(*S))) { errno = ENOMEM; return -1; } NROM = 0; if (shared->shared1.aligned) { NROM = shared->shared1.aligned_size / ((size_t)128 * r); if (((NROM & (NROM - 1)) != 0) || (NROM <= 1) || !(flags & YESCRYPT_RW)) { errno = EINVAL; return -1; } } /* Allocate memory */ V = NULL; V_size = (size_t)128 * r * N; #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX)) V_size *= p; #endif need = V_size; if (flags & __YESCRYPT_INIT_SHARED) { if (local->aligned_size < need) { if (local->base || local->aligned || local->base_size || local->aligned_size) { errno = EINVAL; return -1; } if (!alloc_region(local, need)) return -1; } V = (uint64_t *)local->aligned; need = 0; } B_size = (size_t)128 * r * p; need += B_size; if (need < B_size) { errno = ENOMEM; return -1; } XY_size = (size_t)256 * r + 64; #ifdef _OPENMP XY_size *= p; #endif need += XY_size; if (need < XY_size) { errno = ENOMEM; return -1; } if (flags & YESCRYPT_PWXFORM) { size_t S_size = S_SIZE_ALL * sizeof(*S); #ifdef _OPENMP S_size *= p; #else if (flags & YESCRYPT_PARALLEL_SMIX) S_size *= p; #endif need += S_size; if (need < S_size) { errno = ENOMEM; return -1; } } if (flags & __YESCRYPT_INIT_SHARED) { if (!alloc_region(&tmp, need)) return -1; B = (uint64_t *)tmp.aligned; XY = (uint64_t *)((uint8_t *)B + B_size); } else { init_region(&tmp); if (local->aligned_size < need) { if (free_region(local)) return -1; if (!alloc_region(local, need)) return -1; } B = (uint64_t *)local->aligned; V = (uint64_t *)((uint8_t *)B + B_size); XY = (uint64_t *)((uint8_t *)V + V_size); } S = NULL; if (flags & YESCRYPT_PWXFORM) S = (uint64_t *)((uint8_t *)XY + XY_size); if (t || flags) { SHA256_CTX_Y ctx; SHA256_Init_Y(&ctx); SHA256_Update_Y(&ctx, passwd, passwdlen); SHA256_Final_Y((uint8_t *)sha256, &ctx); passwd = (uint8_t *)sha256; passwdlen = sizeof(sha256); } /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */ PBKDF2_SHA256_Y(passwd, passwdlen, salt, saltlen, 1, (uint8_t *)B, B_size); if (t || flags) blkcpy(sha256, B, sizeof(sha256) / sizeof(sha256[0])); if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) { smix(B, r, N, p, t, flags, V, NROM, shared, XY, S); } else { uint32_t i; /* 2: for i = 0 to p - 1 do */ #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S) #endif for (i = 0; i < p; i++) { /* 3: B_i <-- MF(B_i, N) */ #ifdef _OPENMP smix(&B[(size_t)16 * r * i], r, N, 1, t, flags, &V[(size_t)16 * r * i * N], NROM, shared, &XY[((size_t)32 * r + 8) * i], S ? &S[S_SIZE_ALL * i] : S); #else smix(&B[(size_t)16 * r * i], r, N, 1, t, flags, V, NROM, shared, XY, S); #endif } } /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */ PBKDF2_SHA256_Y(passwd, passwdlen, (uint8_t *)B, B_size, 1, buf, buflen); /* * Except when computing classic scrypt, allow all computation so far * to be performed on the client. The final steps below match those of * SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so * far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of * SCRAM's use of SHA-1) would be usable with yescrypt hashes. */ if ((t || flags) && buflen == sizeof(sha256)) { /* Compute ClientKey */ { HMAC_SHA256_CTX_Y ctx; HMAC_SHA256_Init_Y(&ctx, buf, buflen); if (r == 32) { // yescryptR32 HMAC_SHA256_Update_Y(&ctx, "WaviBanana", 10); } else if (r == 16) { // yescryptR16 HMAC_SHA256_Update_Y(&ctx, "Client Key", 10); } else if (r == 8) { // yescryptR8 HMAC_SHA256_Update_Y(&ctx, "Client Key", 10); } else { // yescrypt HMAC_SHA256_Update_Y(&ctx, salt, saltlen); } HMAC_SHA256_Final_Y((uint8_t *)sha256, &ctx); } /* Compute StoredKey */ { SHA256_CTX_Y ctx; SHA256_Init_Y(&ctx); SHA256_Update_Y(&ctx, (uint8_t *)sha256, sizeof(sha256)); SHA256_Final_Y(buf, &ctx); } } if (free_region(&tmp)) return -1; /* Success! */ return 0; }
rose_reduction.c
/* * Test for automatic recognition of reduction variables * */ #include "omp.h" int a[100]; int sum; void foo() { int i; int sum2; int xx; int yy; int zz; sum = 0; #pragma omp parallel for private (i) reduction (+:sum,xx) reduction (-:yy) reduction (*:zz) for (i = 0; i <= 99; i += 1) { a[i] = i; sum = a[i] + sum; xx++; yy--; zz *= a[i]; } sum2 = sum + xx + yy + zz; a[1] = 1; }
settings.h
/* Copyright (c) 2020, VSB - Technical University of Ostrava and Graz University of Technology All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the names of VSB - Technical University of Ostrava and Graz University of Technology nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL VSB - TECHNICAL UNIVERSITY OF OSTRAVA AND GRAZ UNIVERSITY OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file settings.h * @brief Besthea settings. */ #ifndef INCLUDE_BESTHEA_SETTINGS_H_ #define INCLUDE_BESTHEA_SETTINGS_H_ #include "boost/align.hpp" #include "mpi.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <type_traits> #include <vector> #ifndef DATA_ALIGN #define DATA_ALIGN 64 //!< Cache-line size in bytes. #endif #ifndef M_PI #define M_PI 3.14159265358979323846 //!< pi #endif // pragma to switch between cluster- and timestep-wise nearfield computation // NOTE: this is only relevant in case of non-distributed pFMM #define NEARFIELD_CLUSTERWISE //!< Pragma to control nearfield computation namespace besthea { using scalar = double; //!< Floating point type. // using index = std::size_t; //!< Indexing type. using index = long; //!< Indexing type. using index_signed = std::make_signed< index >::type; //!< Signed indexing type. using index_unsigned = std::make_unsigned< index >::type; //!< Unsigned indexing type. using short_index = int16_t; //!< Signed short integer. using short_index_unsigned = std::make_unsigned< short_index >::type; //!< Unsigned short integer. template< class T > using allocator_type = boost::alignment::aligned_allocator< T, DATA_ALIGN >; //!< Aligned allocator. } // namespace besthea using sc = besthea::scalar; //!< Floating point type. using lo = besthea::index; //!< Indexing type. using los = besthea::index_signed; //!< Signed indexing type. using lou = besthea::index_unsigned; //!< Unsigned indexing type. using slos = besthea::short_index; //!< Short signed indexing type. using slou = besthea::short_index_unsigned; //!< Short unsigned indexing type. // structures to deduce MPI datatypes /** * Returns scalar MPI datatype based on the template C++ type. */ template< class scalar_type > struct get_scalar_type {}; /** * Returns scalar MPI datatype based on the template C++ type. */ template<> struct get_scalar_type< double > { /** * Returns scalar MPI datatype based on the template C++ type. */ static MPI_Datatype MPI_SC( ) { return MPI_DOUBLE; } }; /** * Returns scalar MPI datatype based on the template C++ type. */ template<> struct get_scalar_type< float > { /** * Returns scalar MPI datatype based on the template C++ type. */ static MPI_Datatype MPI_SC( ) { return MPI_FLOAT; } }; /** * Returns indexing MPI datatype based on the template C++ type. */ template< class index_type > struct get_index_type {}; /** * Returns indexing MPI datatype based on the template C++ type. */ template<> struct get_index_type< int > { /** * Returns indexing MPI datatype based on the template C++ type. */ static MPI_Datatype MPI_LO( ) { return MPI_INT; } }; /** * Returns indexing MPI datatype based on the template C++ type. */ template<> struct get_index_type< long > { /** * Returns indexing MPI datatype based on the template C++ type. */ static MPI_Datatype MPI_LO( ) { return MPI_LONG; } }; /** * Returns indexing MPI datatype based on the template C++ type. */ template<> struct get_index_type< unsigned long > { /** * Returns indexing MPI datatype based on the template C++ type. */ static MPI_Datatype MPI_LO( ) { return MPI_UNSIGNED_LONG; } }; // create custom OpenMP reuductions // replaced initializer(omp_priv(omp_orig)) #pragma omp declare reduction( lo_vec_plus : std::vector<lo> : \ std::transform(omp_in.begin(), omp_in.end(), omp_out.begin(), \ omp_out.begin(), std::plus<lo>()) ) \ initializer(omp_priv = decltype(omp_orig)(omp_orig.size())) #endif /* INCLUDE_BESTHEA_SETTINGS_H_ */
ast-dump-openmp-for.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp for for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp for for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp for collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl 0x{{.*}} <{{.*}}ast-dump-openmp-for.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt 0x{{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPForDirective 0x{{.*}} <line:4:1, col:16> // CHECK-NEXT: | `-CapturedStmt 0x{{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt 0x{{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:19> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:23> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:26> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt 0x{{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl 0x{{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl 0x{{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr 0x{{.*}} <col:3> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl 0x{{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt 0x{{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPForDirective 0x{{.*}} <line:10:1, col:16> // CHECK-NEXT: | `-CapturedStmt 0x{{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt 0x{{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:19> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:23> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:26> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt 0x{{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:21> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:25> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:28> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt 0x{{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl 0x{{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl 0x{{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl 0x{{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr 0x{{.*}} <line:11:3> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr 0x{{.*}} <line:12:25> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl 0x{{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt 0x{{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPForDirective 0x{{.*}} <line:17:1, col:28> // CHECK-NEXT: | |-OMPCollapseClause 0x{{.*}} <col:17, col:27> // CHECK-NEXT: | | `-ConstantExpr 0x{{.*}} <col:26> 'int' // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:26> 'int' 1 // CHECK-NEXT: | `-CapturedStmt 0x{{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt 0x{{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:19> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:23> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:26> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt 0x{{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:21> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:25> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:28> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt 0x{{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl 0x{{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl 0x{{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl 0x{{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr 0x{{.*}} <line:18:3> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr 0x{{.*}} <line:19:25> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl 0x{{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl 0x{{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt 0x{{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPForDirective 0x{{.*}} <line:24:1, col:28> // CHECK-NEXT: | |-OMPCollapseClause 0x{{.*}} <col:17, col:27> // CHECK-NEXT: | | `-ConstantExpr 0x{{.*}} <col:26> 'int' // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:26> 'int' 2 // CHECK-NEXT: | `-CapturedStmt 0x{{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt 0x{{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:19> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:23> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:26> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt 0x{{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt 0x{{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl 0x{{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator 0x{{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr 0x{{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr 0x{{.*}} <col:21> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr 0x{{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:25> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator 0x{{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:28> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt 0x{{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl 0x{{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl 0x{{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl 0x{{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr 0x{{.*}} <line:25:3> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr 0x{{.*}} <line:26:5> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl 0x{{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl 0x{{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl 0x{{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl 0x{{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt 0x{{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPForDirective 0x{{.*}} <line:31:1, col:28> // CHECK-NEXT: |-OMPCollapseClause 0x{{.*}} <col:17, col:27> // CHECK-NEXT: | `-ConstantExpr 0x{{.*}} <col:26> 'int' // CHECK-NEXT: | `-IntegerLiteral 0x{{.*}} <col:26> 'int' 2 // CHECK-NEXT: `-CapturedStmt 0x{{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt 0x{{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt 0x{{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl 0x{{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator 0x{{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr 0x{{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:19> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr 0x{{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr 0x{{.*}} <col:23> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator 0x{{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr 0x{{.*}} <col:26> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt 0x{{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt 0x{{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl 0x{{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator 0x{{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr 0x{{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:21> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr 0x{{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr 0x{{.*}} <col:25> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator 0x{{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr 0x{{.*}} <col:28> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt 0x{{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | |-DeclStmt 0x{{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl 0x{{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral 0x{{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator 0x{{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr 0x{{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr 0x{{.*}} <col:23> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr 0x{{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr 0x{{.*}} <col:27> 'int' lvalue ParmVar 0x{{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator 0x{{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr 0x{{.*}} <col:30> 'int' lvalue Var 0x{{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt 0x{{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl 0x{{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl 0x{{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl 0x{{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral 0x{{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl 0x{{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral 0x{{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr 0x{{.*}} <line:32:3> 'int' lvalue ParmVar 0x{{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr 0x{{.*}} <line:33:5> 'int' lvalue ParmVar 0x{{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr 0x{{.*}} <line:34:27> 'int' lvalue ParmVar 0x{{.*}} 'z' 'int'
GB_binop__band_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__band_uint32 // A.*B function (eWiseMult): GB_AemultB__band_uint32 // A*D function (colscale): GB_AxD__band_uint32 // D*A function (rowscale): GB_DxB__band_uint32 // C+=B function (dense accum): GB_Cdense_accumB__band_uint32 // C+=b function (dense accum): GB_Cdense_accumb__band_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__band_uint32 // C=scalar+B GB_bind1st__band_uint32 // C=scalar+B' GB_bind1st_tran__band_uint32 // C=A+scalar GB_bind2nd__band_uint32 // C=A'+scalar GB_bind2nd_tran__band_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij) & (bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x) & (y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BAND || GxB_NO_UINT32 || GxB_NO_BAND_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__band_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__band_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__band_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__band_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__band_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__band_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__band_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__band_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x) & (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__band_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij) & (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x) & (aij) ; \ } GrB_Info GB_bind1st_tran__band_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij) & (y) ; \ } GrB_Info GB_bind2nd_tran__band_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
feature_group.h
/*! * Copyright (c) 2017 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_FEATURE_GROUP_H_ #define LIGHTGBM_FEATURE_GROUP_H_ #include <LightGBM/bin.h> #include <LightGBM/meta.h> #include <LightGBM/utils/random.h> #include <cstdio> #include <memory> #include <vector> namespace LightGBM { class Dataset; class DatasetLoader; /*! \brief Using to store data and providing some operations on one feature group*/ class FeatureGroup { public: friend Dataset; friend DatasetLoader; /*! * \brief Constructor * \param num_feature number of features of this group * \param bin_mappers Bin mapper for features * \param num_data Total number of data * \param is_enable_sparse True if enable sparse feature * \param sparse_threshold Threshold for treating a feature as a sparse feature */ FeatureGroup(int num_feature, bool is_multi_val, std::vector<std::unique_ptr<BinMapper>>* bin_mappers, data_size_t num_data) : num_feature_(num_feature), is_multi_val_(is_multi_val), is_sparse_(false) { CHECK(static_cast<int>(bin_mappers->size()) == num_feature); // use bin at zero to store most_freq_bin num_total_bin_ = 1; bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(bin_mappers->at(i).release()); auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= 1; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } if (is_multi_val_) { multi_bin_data_.clear(); for (int i = 0; i < num_feature_; ++i) { int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1; if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) { multi_bin_data_.emplace_back(Bin::CreateSparseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } else { multi_bin_data_.emplace_back(Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } } } else { bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_)); } } FeatureGroup(std::vector<std::unique_ptr<BinMapper>>* bin_mappers, data_size_t num_data) : num_feature_(1), is_multi_val_(false) { CHECK(static_cast<int>(bin_mappers->size()) == 1); // use bin at zero to store default_bin num_total_bin_ = 1; bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(bin_mappers->at(i).release()); auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= 1; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } if (bin_mappers_[0]->sparse_rate() >= kSparseThreshold) { is_sparse_ = true; bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_)); } else { is_sparse_ = false; bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_)); } } /*! * \brief Constructor from memory * \param memory Pointer of memory * \param num_all_data Number of global data * \param local_used_indices Local used indices, empty means using all data */ FeatureGroup(const void* memory, data_size_t num_all_data, const std::vector<data_size_t>& local_used_indices) { const char* memory_ptr = reinterpret_cast<const char*>(memory); // get is_sparse is_multi_val_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += sizeof(is_multi_val_); is_sparse_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += sizeof(is_sparse_); num_feature_ = *(reinterpret_cast<const int*>(memory_ptr)); memory_ptr += sizeof(num_feature_); // get bin mapper bin_mappers_.clear(); bin_offsets_.clear(); // start from 1, due to need to store zero bin in this slot num_total_bin_ = 1; bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(new BinMapper(memory_ptr)); auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= 1; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); memory_ptr += bin_mappers_[i]->SizesInByte(); } data_size_t num_data = num_all_data; if (!local_used_indices.empty()) { num_data = static_cast<data_size_t>(local_used_indices.size()); } if (is_multi_val_) { for (int i = 0; i < num_feature_; ++i) { int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1; if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) { multi_bin_data_.emplace_back(Bin::CreateSparseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } else { multi_bin_data_.emplace_back(Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } multi_bin_data_.back()->LoadFromMemory(memory_ptr, local_used_indices); memory_ptr += multi_bin_data_.back()->SizesInByte(); } } else { if (is_sparse_) { bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_)); } else { bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_)); } // get bin data bin_data_->LoadFromMemory(memory_ptr, local_used_indices); } } /*! \brief Destructor */ ~FeatureGroup() { } /*! * \brief Push one record, will auto convert to bin and push to bin data * \param tid Thread id * \param idx Index of record * \param value feature value of record */ inline void PushData(int tid, int sub_feature_idx, data_size_t line_idx, double value) { uint32_t bin = bin_mappers_[sub_feature_idx]->ValueToBin(value); if (bin == bin_mappers_[sub_feature_idx]->GetMostFreqBin()) { return; } if (bin_mappers_[sub_feature_idx]->GetMostFreqBin() == 0) { bin -= 1; } if (is_multi_val_) { multi_bin_data_[sub_feature_idx]->Push(tid, line_idx, bin + 1); } else { bin += bin_offsets_[sub_feature_idx]; bin_data_->Push(tid, line_idx, bin); } } inline void CopySubset(const FeatureGroup* full_feature, const data_size_t* used_indices, data_size_t num_used_indices) { if (!is_multi_val_) { bin_data_->CopySubset(full_feature->bin_data_.get(), used_indices, num_used_indices); } else { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->CopySubset(full_feature->multi_bin_data_[i].get(), used_indices, num_used_indices); } } } inline BinIterator* SubFeatureIterator(int sub_feature) { uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin(); if (!is_multi_val_) { uint32_t min_bin = bin_offsets_[sub_feature]; uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1; return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin); } else { int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1; uint32_t min_bin = 1; uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi; return multi_bin_data_[sub_feature]->GetIterator(min_bin, max_bin, most_freq_bin); } } inline void FinishLoad() { if (is_multi_val_) { OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_feature_; ++i) { OMP_LOOP_EX_BEGIN(); multi_bin_data_[i]->FinishLoad(); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } else { bin_data_->FinishLoad(); } } /*! * \brief Returns a BinIterator that can access the entire feature group's raw data. * The RawGet() function of the iterator should be called for best efficiency. * \return A pointer to the BinIterator object */ inline BinIterator* FeatureGroupIterator() { if (is_multi_val_) { return nullptr; } uint32_t min_bin = bin_offsets_[0]; uint32_t max_bin = bin_offsets_.back() - 1; uint32_t most_freq_bin = 0; return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin); } inline data_size_t Split( int sub_feature, const uint32_t* threshold, int num_threshold, bool default_left, data_size_t* data_indices, data_size_t num_data, data_size_t* lte_indices, data_size_t* gt_indices) const { uint32_t default_bin = bin_mappers_[sub_feature]->GetDefaultBin(); uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin(); if (!is_multi_val_) { uint32_t min_bin = bin_offsets_[sub_feature]; uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1; if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) { auto missing_type = bin_mappers_[sub_feature]->missing_type(); return bin_data_->Split(min_bin, max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, num_data, lte_indices, gt_indices); } else { return bin_data_->SplitCategorical(min_bin, max_bin, most_freq_bin, threshold, num_threshold, data_indices, num_data, lte_indices, gt_indices); } } else { int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1; uint32_t min_bin = 1; uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi; if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) { auto missing_type = bin_mappers_[sub_feature]->missing_type(); return multi_bin_data_[sub_feature]->Split(min_bin, max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, num_data, lte_indices, gt_indices); } else { return multi_bin_data_[sub_feature]->SplitCategorical(min_bin, max_bin, most_freq_bin, threshold, num_threshold, data_indices, num_data, lte_indices, gt_indices); } } } /*! * \brief From bin to feature value * \param bin * \return FeatureGroup value of this bin */ inline double BinToValue(int sub_feature_idx, uint32_t bin) const { return bin_mappers_[sub_feature_idx]->BinToValue(bin); } /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const { writer->Write(&is_multi_val_, sizeof(is_multi_val_)); writer->Write(&is_sparse_, sizeof(is_sparse_)); writer->Write(&num_feature_, sizeof(num_feature_)); for (int i = 0; i < num_feature_; ++i) { bin_mappers_[i]->SaveBinaryToFile(writer); } if (is_multi_val_) { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->SaveBinaryToFile(writer); } } else { bin_data_->SaveBinaryToFile(writer); } } /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const { size_t ret = sizeof(is_multi_val_) + sizeof(is_sparse_) + sizeof(num_feature_); for (int i = 0; i < num_feature_; ++i) { ret += bin_mappers_[i]->SizesInByte(); } if (!is_multi_val_) { ret += bin_data_->SizesInByte(); } else { for (int i = 0; i < num_feature_; ++i) { ret += multi_bin_data_[i]->SizesInByte(); } } return ret; } /*! \brief Disable copy */ FeatureGroup& operator=(const FeatureGroup&) = delete; /*! \brief Deep copy */ FeatureGroup(const FeatureGroup& other) { num_feature_ = other.num_feature_; is_multi_val_ = other.is_multi_val_; is_sparse_ = other.is_sparse_; num_total_bin_ = other.num_total_bin_; bin_offsets_ = other.bin_offsets_; bin_mappers_.reserve(other.bin_mappers_.size()); for (auto& bin_mapper : other.bin_mappers_) { bin_mappers_.emplace_back(new BinMapper(*bin_mapper)); } if (!is_multi_val_) { bin_data_.reset(other.bin_data_->Clone()); } else { multi_bin_data_.clear(); for (int i = 0; i < num_feature_; ++i) { multi_bin_data_.emplace_back(other.multi_bin_data_[i]->Clone()); } } } private: /*! \brief Number of features */ int num_feature_; /*! \brief Bin mapper for sub features */ std::vector<std::unique_ptr<BinMapper>> bin_mappers_; /*! \brief Bin offsets for sub features */ std::vector<uint32_t> bin_offsets_; /*! \brief Bin data of this feature */ std::unique_ptr<Bin> bin_data_; std::vector<std::unique_ptr<Bin>> multi_bin_data_; /*! \brief True if this feature is sparse */ bool is_multi_val_; bool is_sparse_; int num_total_bin_; }; } // namespace LightGBM #endif // LIGHTGBM_FEATURE_GROUP_H_
ResNet-18_CPU_imagenet.c
/* Pretrained ResNet-18 Convolutional Neural Network in C language and OpenMP API GitHUB Page: https://github.com/jcanore/vgg16 Author: Jack/jocare Compilation: gcc -O3 ResNet-18_CPU_cifar.c -lm -fopenmp -o ResNet-18_CPU_cifar Usage: ResNet-18_CPU_cifar <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)> Example: ResNet-18_CPU_cifar ../../weights/weights.txt" ../../img/image_list.txt results_imagenet_conv.txt 1 */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <omp.h> #include "sparse.h" double get_seconds(struct timeval tStart, struct timeval tEnd) { return ((tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec) / 1.e6; } #define SIZE 224 #define CONV_SIZE 3 #define CONV_LEVELS 20 //#define _CRT_SECURE_NO_WARNINGS 1 // precompile variables // assure default values if nothing provided #ifndef SPARSE_CONVOLUTIONS #define SPARSE_CONVOLUTIONS 0 // default dense convolutions #endif // SPARSE_CONVOLUTIONS #ifndef FIRST_CONV_SPARSE #define FIRST_CONV_SPARSE 0 // this is almost never 1 #endif // FIRST_CONV_SPARSE #ifndef SPARSE_FULLY_CONNECTED #define SPARSE_FULLY_CONNECTED 0 // this is not implemented yet #endif // SPARSE_FULLY_CONNECTED #ifndef FISHER_PRUNING #define FISHER_PRUNING 0 // set for fisher pruning, all previous variables changed to dense #endif // FISHER_PRUNING #ifndef NUMBER_OF_THREADS #define NUMBER_OF_THREADS 1 // number of threads to run on //#define NUMBER_OF_THREADS omp_get_num_procs() - 1 #endif // NUMBER_OF_THREADS /****************************************************************************************************************************/ int im_sizes[20] = { 224, 112, 56, 56, 56, 28, 28, 28, 28, 28, 14, 14, 14, 14, 14, 7, 7, 7, 7, 7}; //int im_sizes[20] = { 224, 224, 224, 224, 224, 112, 112, 112, 112, 56, 56, 56, 56, 56, 28, 28, 28, 28, 28 }; // Weights and image block START float ***image; #if FISHER_PRUNING #define SPARSE_CONVOLUTIONS 0 // force dense convolutions /* // ORIGINAL FISHER EXPERIMENTS int cshape[20][4] = { { 64, 3, CONV_SIZE, CONV_SIZE }, { 13, 64, CONV_SIZE, CONV_SIZE }, { 64, 13, CONV_SIZE, CONV_SIZE }, { 11, 64, CONV_SIZE, CONV_SIZE }, { 64, 11, CONV_SIZE, CONV_SIZE }, { 31, 64, CONV_SIZE, CONV_SIZE }, { 128, 31, CONV_SIZE, CONV_SIZE }, { 31, 64, 1, 1 }, { 128, 128, CONV_SIZE, CONV_SIZE }, { 13, 128, CONV_SIZE, CONV_SIZE }, { 40, 13, CONV_SIZE, CONV_SIZE }, { 256, 40, CONV_SIZE, CONV_SIZE }, { 40, 13, 1, 1 }, { 256, 256, CONV_SIZE, CONV_SIZE }, { 19, 256, CONV_SIZE, CONV_SIZE }, { 19, 19, CONV_SIZE, CONV_SIZE }, { 512, 19, CONV_SIZE, CONV_SIZE }, { 19, 19, 1, 1 }, { 512, 512, CONV_SIZE, CONV_SIZE }, { 12, 512, CONV_SIZE, CONV_SIZE } }; // batch normalization layer shapes int bshape[20] = { 64, 13, 64, 11, 64, 31, 128, 128, 13, 128, 40, 256, 256, 19, 256, 19, 512, 512, 12, 512 }; // dense layer int dshape[1][2]= { { 512, 10} }; */ // FIXED 90% ACCURACY EXPERIMENTS int cshape[20][4] = { { 64, 3, 3, 3 }, { 9, 64, CONV_SIZE, CONV_SIZE }, { 64, 9, CONV_SIZE, CONV_SIZE }, { 10, 64, CONV_SIZE, CONV_SIZE }, { 64, 10, CONV_SIZE, CONV_SIZE }, { 23, 64, CONV_SIZE, CONV_SIZE }, { 128, 23, CONV_SIZE, CONV_SIZE }, { 128, 64, 1, 1 }, { 7, 128, CONV_SIZE, CONV_SIZE }, { 128, 7, CONV_SIZE, CONV_SIZE }, { 30, 128, CONV_SIZE, CONV_SIZE }, { 256, 30, CONV_SIZE, CONV_SIZE }, { 256, 128, 1, 1 }, { 15, 256, CONV_SIZE, CONV_SIZE }, { 256, 15, CONV_SIZE, CONV_SIZE }, { 15, 256, CONV_SIZE, CONV_SIZE }, { 512, 15, CONV_SIZE, CONV_SIZE }, { 512, 256, 1, 1 }, { 10, 512, CONV_SIZE, CONV_SIZE }, { 512, 10, CONV_SIZE, CONV_SIZE } }; // batch normalization layer shapes int bshape[20] = { 64, 9, 64, 10, 64, 23, 128, 128, 7, 128, 30, 256, 256, 15, 256, 15, 512, 512, 10, 512 }; // dense layer int dshape[1][2]= { { 512, 10} }; #else // FISHER PRUNING int cshape[20][4] = { { 64, 3, CONV_SIZE, CONV_SIZE }, { 64, 64, CONV_SIZE, CONV_SIZE }, { 64, 64, CONV_SIZE, CONV_SIZE }, { 64, 64, CONV_SIZE, CONV_SIZE }, { 64, 64, CONV_SIZE, CONV_SIZE }, { 128, 64, CONV_SIZE, CONV_SIZE }, { 128, 128, CONV_SIZE, CONV_SIZE }, { 128, 64, 1, 1 }, { 128, 128, CONV_SIZE, CONV_SIZE }, { 128, 128, CONV_SIZE, CONV_SIZE }, { 256, 128, CONV_SIZE, CONV_SIZE }, { 256, 256, CONV_SIZE, CONV_SIZE }, { 256, 128, 1, 1 }, { 256, 256, CONV_SIZE, CONV_SIZE }, { 256, 256, CONV_SIZE, CONV_SIZE }, { 512, 256, CONV_SIZE, CONV_SIZE }, { 512, 512, CONV_SIZE, CONV_SIZE }, { 512, 256, 1, 1 }, { 512, 512, CONV_SIZE, CONV_SIZE }, { 512, 512, CONV_SIZE, CONV_SIZE } }; // batch normalization layer shapes int bshape[CONV_LEVELS] = { 64, 64, 64, 64, 64, 64, 128, 128, 128, 128, 128, 256, 256, 256, 256, 256, 512, 512, 512, 512 }; // dense layer int dshape[1][2]= { { 512, 1000} }; #endif // FISHER_PRUNING float *****wc; // weights convolution float **bc; // biases convolution float ***wd; // weights dense float **bd; // biases dense #if SPARSE_CONVOLUTIONS // sparse conv csr_t ****wc_sparse; #endif // SPARSE_CONVOLUTIONS float batchnorm_weights[CONV_LEVELS][512]; float batchnorm_biases[CONV_LEVELS][512]; // Blocks for intermediate convolutions int mem_block_shape[3] = {512, SIZE, SIZE}; // not optimal defining 512 statically float ***mem_block1; float ***mem_block2; float ***shortcut_mem; // Blocks for dense flatten layers int mem_block_dense_shape = { 512 * 1 * 1 }; // size of layer before the fully connected float *mem_block1_dense; float *mem_block2_dense; // Weights and image block END /****************************************************************************************************************************/ void reset_mem_block(float ***mem) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem[i][j][k] = 0.f; } } } } /****************************************************************************************************************************/ void reset_mem_block_dense(float *mem) { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem[i] = 0.f; } } /****************************************************************************************************************************/ void init_memory() { int i, j, k, l; // Init image memory image = malloc(3 * sizeof(float**)); for (i = 0; i < 3; i++) { image[i] = malloc(SIZE * sizeof(float*)); for (j = 0; j < SIZE; j++) { image[i][j] = malloc(SIZE * sizeof(float)); } } #if SPARSE_CONVOLUTIONS wc_sparse = (csr_t****) malloc(CONV_LEVELS * sizeof(csr_t***)); for (l = 0; l < CONV_LEVELS; l++) { wc_sparse[l] = (csr_t***) malloc(cshape[l][0] * sizeof(csr_t**)); for (i = 0; i < cshape[l][0]; i++) { wc_sparse[l][i] = (csr_t**) malloc(cshape[l][1] * sizeof(csr_t*)); } } // wc memory allocated below will be freed in read_weights if SPARSE_CONVOLUTIONS #endif // SPARSE_CONVOLUTIONS // Init convolution weights wc = malloc(CONV_LEVELS * sizeof(float****)); bc = malloc(CONV_LEVELS * sizeof(float*)); for (l = 0; l < CONV_LEVELS; l++) { wc[l] = malloc(cshape[l][0] * sizeof(float***)); for (i = 0; i < cshape[l][0]; i++) { wc[l][i] = malloc(cshape[l][1] * sizeof(float**)); for (j = 0; j < cshape[l][1]; j++) { wc[l][i][j] = malloc(cshape[l][2] * sizeof(float*)); for (k = 0; k < cshape[l][2]; k++) { wc[l][i][j][k] = malloc(cshape[l][3] * sizeof(float)); } } } bc[l] = malloc(cshape[l][0] * sizeof(float)); } // Init dense weights wd = malloc(2 * sizeof(float**)); bd = malloc(2 * sizeof(float*)); for (l = 0; l < 1; l++) { wd[l] = malloc(dshape[l][0] * sizeof(float*)); for (i = 0; i < dshape[l][0]; i++) { wd[l][i] = malloc(dshape[l][1] * sizeof(float)); } bd[l] = malloc(dshape[l][1] * sizeof(float)); } // Init mem_blocks // this size could be dynamic mem_block1 = malloc(mem_block_shape[0] * sizeof(float**)); mem_block2 = malloc(mem_block_shape[0] * sizeof(float**)); shortcut_mem = malloc(mem_block_shape[0] * sizeof(float**)); for (i = 0; i < mem_block_shape[0]; i++) { mem_block1[i] = malloc(mem_block_shape[1] * sizeof(float*)); mem_block2[i] = malloc(mem_block_shape[1] * sizeof(float*)); shortcut_mem[i] = malloc(mem_block_shape[1] * sizeof(float*)); for (j = 0; j < mem_block_shape[1]; j++) { mem_block1[i][j] = malloc(mem_block_shape[2] * sizeof(float)); mem_block2[i][j] = malloc(mem_block_shape[2] * sizeof(float)); shortcut_mem[i][j] = malloc(mem_block_shape[2] * sizeof(float)); } } // reset_mem_block(mem_block1); // reset_mem_block(mem_block2); // Init mem blocks dense mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float)); mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float)); // Init batchnorm blocks //batchnorm_weights = malloc(2 * sizeof(float*)); //batchnorm_biases = malloc(2 * sizeof(float*)); //for (int z = 0; z < 20; z++) { //batchnorm_weights[z] = malloc(512 * sizeof(float)); //batchnorm_biases[z] = malloc(512 * sizeof(float)); //} } /****************************************************************************************************************************/ void free_memory() { int i, j, k, l; // Free image memory for (i = 0; i < 3; i++) { for (j = 0; j < SIZE; j++) { free(image[i][j]); } free(image[i]); } free(image); // Free convolution weights for (l = 0; l < CONV_LEVELS; l++) { #if SPARSE_CONVOLUTIONS for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { free(wc_sparse[l][i][j]); } free(wc_sparse[l][i]); } free(wc_sparse[l]); #else for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); #endif free(bc[l]); } // free(wc); // free(bc); #if SPARSE_CONVOLUTIONS free(wc_sparse); #else free(wc); #endif // SPARSE_CONVOLUTIONS free(bc); // Free dense weights /* for (l = 0; l < 2; l++) { for (i = 0; i < dshape[l][0]; i++) { free(wd[l][i]); } free(wd[l]); free(bd[l]); } free(wd); free(bd); */ // Free memblocks for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { free(mem_block1[i][j]); free(mem_block2[i][j]); free(shortcut_mem[i][j]); } free(mem_block1[i]); free(mem_block2[i]); free(shortcut_mem[i]); } free(mem_block1); free(mem_block2); free(shortcut_mem); free(mem_block1_dense); free(mem_block2_dense); } /****************************************************************************************************************************/ void read_weights(char *in_file, int lvls) { /* weights are written out as: - 20 x convolutional weights NO bias - 20 x batchnorm weights with bias - 1 x fc weights with bias */ float dval; int i, j, k, l, z; FILE *iin; int total_lvls_read = 0; // printf("\nin_file es: %s\n\n", in_file); iin = fopen64(in_file, "r"); if (iin == NULL) { printf("Weights file %s absent\n", in_file); exit(1); } // Reading convolution weights (store them flipped from begining) // no biases for (z = 0; z < CONV_LEVELS; z++) { printf("Read conv block %d weights\n", z); for (i = 0; i < cshape[z][0]; i++) { for (j = 0; j < cshape[z][1]; j++) { for (k = 0; k < cshape[z][2]; k++) { for (l = 0; l < cshape[z][3]; l++) { fscanf(iin, "%f", &dval); wc[z][i][j][k][l] = dval; } } } } total_lvls_read += 1; } /* // run this to check conv weights are correct z = 19; // print back to verify for (i = 0; i < cshape[z][0]; i++) { for (j = 0; j < cshape[z][1]; j++) { for (k = 0; k < cshape[z][2]; k++) { for (l = 0; l < cshape[z][3]; l++) { printf("conv 5: %f \n", wc[z][i][j][k][l]); } } } } return; */ for (z = 0; z < CONV_LEVELS; z++) { // batchnorm weights and biases printf("Read batchnorm block %d weights\n", z); for (i = 0; i < bshape[z]; i++) { fscanf(iin, "%f", &dval); //printf("weight %i : %f \n", i, dval); batchnorm_weights[z][i] = dval; } for (i = 0; i < bshape[z]; i++) { fscanf(iin, "%f", &dval); //printf("bias %i : %f \n", i, dval); batchnorm_biases[z][i] = dval; } } if (total_lvls_read >= lvls && lvls != -1) return; // Reading dense weights int num_dense_layers = 1; for (z = 0; z < num_dense_layers; z++) { printf("Read dense block %d weights\n", z); for (i = 0; i < dshape[z][0]; i++) { for (j = 0; j < dshape[z][1]; j++) { fscanf(iin, "%f", &dval); //printf("weight: %i : %f \n", i, dval); wd[z][i][j] = dval; } } for (i = 0; i < dshape[z][1]; i++) { fscanf(iin, "%f", &dval); //printf("bias %i : %f \n", i, dval); bd[z][i] = dval; } } fclose(iin); /////////////**************** SPARSE ************///////////////////////////// #if SPARSE_CONVOLUTIONS // convert to sparse format for (l = 0; l < CONV_LEVELS; l++) for (i = 0; i < cshape[l][0]; i++) for (j = 0; j < cshape[l][1]; j++) { //printf("going for %d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, cshape[l][1]); csr_t* a = dense2csr2(cshape[l][2], cshape[l][3], wc[l][i][j]); //print_csr(a); wc_sparse[l][i][j] = a; //printf("done..%d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, cshape[l][1]); } // Free convolution weights #if FIRST_CONV_SPARSE == 0 l = 0; // allocate new memory for first conv and copy from wc float *****wc_first_conv = (float*****) malloc(1 * sizeof(float****)); wc_first_conv[l] = (float****) malloc(cshape[l][0] * sizeof(float***)); int k1, k2; for (i = 0; i < cshape[l][0]; i++) { wc_first_conv[l][i] = (float***) malloc(cshape[l][1] * sizeof(float**)); for (j = 0; j < cshape[l][1]; j++) { wc_first_conv[l][i][j] = (float**) malloc(cshape[l][2] * sizeof(float*)); for (k1 = 0; k1 < cshape[l][2]; k1++) { wc_first_conv[l][i][j][k1] = (float*) malloc(cshape[l][3] * sizeof(float)); for (k2 = 0; k2 < cshape[l][3]; k2++) wc_first_conv[l][i][j][k1][k2] = wc[l][i][j][k1][k2]; } } } #endif // FIRST_CONV_SPARSE == 0 // free up all dense conv layer representation for (l = 0; l < CONV_LEVELS; l++) { for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); } free(wc); #if FIRST_CONV_SPARSE == 0 // replace old wc pointer with the data for only first conv layer created above wc = wc_first_conv; #endif // FIRST_CONV_SPARSE == 0 #endif // SPARSE_CONVOLUTIONS } /****************************************************************************************************************************/ void read_image(char *in_file) { int i, j, l; FILE *iin; float dval; iin = fopen(in_file, "r"); if (iin == NULL) { printf("Image file %s absent\n", in_file); exit(1); } /* Reading image */ for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { for (l = 0; l < 3; l++) { fscanf(iin, "%f", &dval); image[l][i][j] = dval; // printf("i[%d][%d][%d]:%f\n", i, j, l, dval); } } } } /****************************************************************************************************************************/ void convolution_3_x_3(float **matrix, float **kernel, float **out, int size, int stride) { int i, j; float sum; // float zeropad[SIZE + 2][SIZE + 2] = { 0.0 }; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } // float** zeropad = (float**) malloc((size + 2) * sizeof(float*)); //[size+2][size+2]; // for (i = 0; i < (size + 2); ++i) // zeropad[i] = (float*) malloc ((size + 2) * sizeof(float)); // //memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // // padding with zeros // for (i = 0; i < size + 2; ++i) { // zeropad[i][0] = 0; // zeropad[i][size + 1] = 0; // } // for (i = 1; i < size + 1; ++i) { // zeropad[0][i] = 0; // zeropad[size + 1][i] = 0; // } // // copying input value // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // zeropad[i + 1][j + 1] = matrix[i][j]; // } // } for (i = 0; i < size; i=i+stride) { for (j = 0; j < size; j=j+stride) { sum = zeropad[i ][j ] * kernel[0][0] + zeropad[i ][j + 1] * kernel[0][1] + zeropad[i ][j + 2] * kernel[0][2] + zeropad[i + 1][j ] * kernel[1][0] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j ] * kernel[2][0] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i + 2][j + 2] * kernel[2][2]; out[i][j] += sum; } } // for (i = 0; i < (size + 2); ++i) // free(zeropad[i]); // free(zeropad); } /****************************************************************************************************************************/ void convolution_3_x_3_sparse(float **matrix, csr_t* kernel, float **out, int size, int stride) { // printf("sparse\n"); int i, j; // float zeropad[SIZE + 2][SIZE + 2] = { 0.0 }; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } // float** zeropad = (float**) malloc((size + 2) * sizeof(float*)); //[size+2][size+2]; // for (i = 0; i < (size + 2); ++i) // zeropad[i] = (float*) malloc ((size + 2) * sizeof(float)); // //memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // // padding with zeros // for (i = 0; i < size + 2; ++i) { // zeropad[i][0] = 0; // zeropad[i][size + 1] = 0; // } // for (i = 1; i < size + 1; ++i) { // zeropad[0][i] = 0; // zeropad[size + 1][i] = 0; // } // // copying input value // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // zeropad[i + 1][j + 1] = matrix[i][j]; // } // } // // convolution // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // out[i][j] += s_csr_conv(kernel, zeropad, i, j); // } // } // for (i = 0; i < (size + 2); ++i) // free(zeropad[i]); // free(zeropad); int k,l; float sum; // convolution for (i = 0; i < size; i+=stride) { for (j = 0; j < size; j+=stride) { //out[i][j] += s_csr_conv(kernel, zeropad, i, j); sum = 0.f; for (k = 0; k < kernel->nrows; ++k) { // for every nonzero element in this row for (l = kernel->rowptr[k]; l < kernel->rowptr[k + 1]; ++l) { // Scale the corresponding row of B with the nonzero value of A float value = kernel->values[l]; int col = kernel->colind[l]; sum += value * zeropad[i + k][j + col]; } } out[i][j] += sum; } } } /****************************************************************************************************************************/ void convolution_1_x_1(float **matrix, float **kernel, float **out, int size) { int i, j; float sum; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { sum = zeropad[i][j] * kernel[0][0]; out[i][j] += sum; } } } /****************************************************************************************************************************/ void convolution_1_x_1_sparse(float **matrix, csr_t *kernel, float **out, int size) { int i, j; float sum; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } int k,l; for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { //sum = zeropad[i][j] * kernel[0][0]; //out[i][j] += sum; sum = 0.f; for (k = 0; k < kernel->nrows; ++k) { // for every nonzero element in this row for (l = kernel->rowptr[k]; l < kernel->rowptr[k + 1]; ++l) { // Scale the corresponding row of B with the nonzero value of A float value = kernel->values[l]; int col = kernel->colind[l]; sum += value * zeropad[i + k][j + col]; } } out[i][j] += sum; } } } /****************************************************************************************************************************/ // no bias void add_relu(float **out, int size) { int i, j; for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { if (out[i][j] < 0) out[i][j] = 0.f; } } } /****************************************************************************************************************************/ void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) { int i; for (i = 0; i < size; i++) { out[i] += bs[i]; // printf("%f\n", out[i]); if (relu == 1) { if (out[i] < 0) out[i] = 0.f; } } } /****************************************************************************************************************************/ void flatten(float ***in, float *out, int sh0, int sh1, int sh2) { int i, j, k, total = 0; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { out[total] = in[i][j][k]; total++; } } } } /****************************************************************************************************************************/ void dense(float *in, float **weights, float *out, int sh_in, int sh_out) { int i, j; //#pragma omp parallel for private(i,j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } } /****************************************************************************************************************************/ void batchnorm(float ***in, float ***out, float *weights, float *bias, int num_channels, int im_size) { int channel, i, j; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for(channel = 0; channel < num_channels; channel++) { for(i = 0; i < im_size; i++) { for(j = 0; j < im_size; j++) { out[channel][i][j] = in[channel][i][j] * weights[channel] + bias[channel]; } } } } /****************************************************************************************************************************/ void batchnorm_and_relu(float ***in, float ***out, float *weights, float *bias, int num_channels, int im_size) { int channel, i, j; #pragma omp parallel for private(i,j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for(channel = 0; channel < num_channels; channel++) { for(i = 0; i < im_size; i++) { for(j = 0; j < im_size; j++) { out[channel][i][j] = in[channel][i][j] * weights[channel] + bias[channel]; if (out[channel][i][j] < 0.f) out[channel][i][j] = 0.f; } } } } /****************************************************************************************************************************/ float avg_of(float **in, int start_x, int start_y, int kernel_size) { float sum = 0.; int i, j; for(i = 0; i < kernel_size; ++i) { for(j = 0; j < kernel_size; ++j) { sum += in[start_x+i][start_y+j]; } } return sum / (kernel_size * kernel_size); } /****************************************************************************************************************************/ void avg_pool(float ***in, float ***out, int channels, int k_size, int image_size) { int c; for(c = 0; c < channels; ++c) { out[c][0][0] = avg_of(in[c],0,0, 7); //out[c][0][1] = avg_of(in[c],0,6, 7); //out[c][1][0] = avg_of(in[c],6,0, 7); //out[c][1][1] = avg_of(in[c],6,6, 7); } } /****************************************************************************************************************************/ void dump_image() { int i, j, k; for (i = 0; i < 3; i++) { for (j = 0; j < SIZE; j++) { for (k = 0; k < SIZE; k++) { printf("%.12lf\n", image[i][j][k]); } } } } /****************************************************************************************************************************/ void output_predictions(FILE *out, int only_convolution, int size, int cur_size) { int i; int c=0; if (only_convolution == 1) { for (i = 0; i < size * cur_size * cur_size; i++) { fprintf(out, "%g ", mem_block1_dense[i]); } } else { double maximum=-1; // dshape[0][1] ==> 10 for (i = 0; i < dshape[0][1]; i++) { fprintf(out, "%g ", mem_block2_dense[i]); if(mem_block1_dense[i]>maximum){ maximum=mem_block2_dense[i]; c=i+1; } } //fprintf(out, "\n"); printf("-------------------------\n"); printf("This image depicts class: %d\n",c); } } /****************************************************************************************************************************/ void conv_norm_block(int level, int shortcut) { int in_planes = cshape[level][1]; int i, j, k; // if shortcut then save image for layer if(shortcut==1) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { shortcut_mem[i][j][k] = mem_block1[i][j][k]; } } } } //int in_planes = cshape[level][0] int out_planes = cshape[level][0]; int stride = 1; //------------------------------------------------------------------------------------------------------------------------------- // conv 1 #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for (i = 0; i < out_planes; i++) { for (j = 0; j < in_planes; j++) { #if SPARSE_CONVOLUTIONS convolution_3_x_3_sparse(mem_block1[j], wc_sparse[level][i][j], mem_block2[i], im_sizes[level], stride); #else convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], im_sizes[level], stride); #endif // SPARSE_CONVOLUTIONS } } batchnorm_and_relu(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]); // batchnorm(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]); // for(i = 0; i < out_planes; i++) { // add_relu(mem_block1[i], im_sizes[level]); // } reset_mem_block(mem_block2); //------------------------------------------------------------------------------------------------------------------------------- // conv 2 level += 1; in_planes = cshape[level][1]; out_planes = cshape[level][0]; //------------------------------------------------------------------------------------------------------------------------------- #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for (i = 0; i < out_planes; i++) { for (j = 0; j < in_planes; j++) { #if SPARSE_CONVOLUTIONS convolution_3_x_3_sparse(mem_block1[j], wc_sparse[level][i][j], mem_block2[i], im_sizes[level], stride); #else convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], im_sizes[level], stride); #endif // SPARSE_CONVOLUTIONS } } batchnorm_and_relu(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]); // batchnorm(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]); // for(i = 0; i < out_planes; i++) { // add_relu(mem_block1[i], im_sizes[level]); // } reset_mem_block(mem_block2); // if shortcut: conv bn + out if(shortcut==1) { level += 1; in_planes = cshape[level][1]; out_planes = cshape[level][0]; for (i = 0; i < out_planes; i++) { for (j = 0; j < in_planes; j++) { #if SPARSE_CONVOLUTIONS convolution_1_x_1_sparse(shortcut_mem[j], wc_sparse[level][i][j], mem_block2[i], im_sizes[level]); #else convolution_1_x_1(shortcut_mem[j], wc[level][i][j], mem_block2[i], im_sizes[level]); #endif // SPARSE_CONVOLUTIONS } } batchnorm_and_relu(mem_block2, shortcut_mem, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]); // batchnorm(mem_block2, shortcut_mem, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]); // for(i = 0; i < out_planes; i++) { // add_relu(shortcut_mem[i], im_sizes[level]); // } reset_mem_block(mem_block2); // add results for(i = 0; i < out_planes; i++) { for(j = 0; j < im_sizes[level]; j++) { for(k = 0; k < im_sizes[level]; k++) { mem_block1[i][j][k] = mem_block1[i][j][k] + shortcut_mem[i][j][k]; } } } } } /****************************************************************************************************************************/ void get_resnet18_predict(FILE *out, int only_convolution) { int i, j, k; int level = 0; // Init intermediate memory reset_mem_block(mem_block1); reset_mem_block(mem_block2); reset_mem_block_dense(mem_block1_dense); reset_mem_block_dense(mem_block2_dense); //------------------------------------------------------------------------------------------------------------------------------- // Layer 1 (Convolution 3 -> 64) //add_relu(mem_block2[i], 32); ///???? WHY DO WE NEED THIS HERE? // print the image /* for (i = 0; i < 32; i++) { for (j = 0; j < 32; j++) { for (k = 0; k < 3; k++) { printf("%f \n", image[k][i][j]); } } } return; */ int counter = 0; for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { #if FIRST_CONV_SPARSE convolution_3_x_3_sparse(image[j], wc_sparse[level][i][j], mem_block2[i], im_sizes[level], stride); #else convolution_3_x_3(image[j], wc[level][i][j], mem_block2[i], im_sizes[level], 1); #endif // FIRST_CONV_SPARSE } // [print content of mem block] /* for(int m = 0; m < 32; m++) { for(int n = 0; n < 32; n++) { printf("%i: %f\n", counter, mem_block1[i][m][n]); counter++; } } */ //relu(mem_block2[i], 32); } batchnorm_and_relu(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], 64, 32); // batchnorm(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], 64, 32); // for(i = 0; i < cshape[level][0]; i++) { // add_relu(mem_block1[i], 32); // } reset_mem_block(mem_block2); /* counter = 0; // print mem block 2: for (i = 0; i < 64; i++) { for (j = 0; j < 32; j++) { for (k = 0; k < 32; k++) { counter++; if (counter < 100) { printf("%i: %f\n",counter, mem_block2[i][j][k]); } } } } return; */ level++; //------------------------------------------------------------------------------------------------------------------------------- int shortcut = 1; int no_shortcut = 0; // 2 blocks of 64 conv_norm_block(level, no_shortcut); level+=2; conv_norm_block(level, no_shortcut); level+=2; // 2 blocks of 128 conv_norm_block(level, shortcut); level+=3; conv_norm_block(level, no_shortcut); level+=2; // 2 blocks of 256 conv_norm_block(level, shortcut); level+=3; conv_norm_block(level, no_shortcut); level+=2; // 2 blocks of 512 conv_norm_block(level, shortcut); level+=3; conv_norm_block(level, no_shortcut); level+=2; level = level - 1; // average pool 7 with 1024 channels of 7x7 images avg_pool(mem_block1, mem_block2, 512, 7, 7); // flatten flatten(mem_block2, mem_block1_dense, cshape[level][0], 1, 1); // dense level = 0; dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 1); reset_mem_block_dense(mem_block1_dense); return; } /****************************************************************************************************************************/ char *trimwhitespace(char *str){ char *end; // Trim leading space while (isspace((unsigned char)*str)) str++; if (*str == 0) // All spaces? return str; // Trim trailing space end = str + strlen(str) - 1; while (end > str && isspace((unsigned char)*end)) end--; // Write new null terminator *(end + 1) = 0; return str; } /****************************************************************************************************************************/ int main(int argc, char *argv[]) { FILE *file_list, *results; char buf[1024]; struct timeval tStart, tEnd; double deltaTime; char *weights_file; char *image_list_file; char *output_file; int lvls = -1; int only_convolution = 0; //----------------------------------------------------------------------- printf("Using %d threads\n", NUMBER_OF_THREADS); if (argc != 4 && argc != 5) { printf("Usage: <program.exe> <weights file> <images list file> <output file> <only_convolution [optional]>\n"); return 0; } weights_file = argv[1]; //printf("%s\n", weights_file); image_list_file = argv[2]; output_file = argv[3]; if (argc == 5) { lvls = CONV_LEVELS; only_convolution = 1; } //----------------------------------------------------------------------- init_memory(); file_list = fopen(image_list_file, "r"); if (file_list == NULL) { printf("Check file list location: %s\n", image_list_file); return 1; } results = fopen(output_file, "w"); if (results == NULL) { printf("Couldn't open file for writing: %s\n", output_file); return 1; } gettimeofday(&tStart, NULL); read_weights(weights_file, lvls); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Reading weights: %.3lf sec\n", deltaTime); while (!feof(file_list)) { fgets(buf, 512, file_list); if (strlen(buf) == 0) { break; } // printf("%d\n", strlen(buf)); read_image(trimwhitespace(buf)); // normalize_image(); // dump_image(); gettimeofday(&tStart, NULL); // get_resnet18_predict(only_convolution); get_resnet18_predict(results, only_convolution); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Infer image %s: %.3lf sec\n", buf, deltaTime); // output_predictions(results, only_convolution); output_predictions(results, only_convolution, 512, 1); } //free_memory(); fclose(file_list); return 0; }
GB_binop__lor_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__lor_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__lor_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__lor_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_uint64) // A*D function (colscale): GB (_AxD__lor_uint64) // D*A function (rowscale): GB (_DxB__lor_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__lor_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__lor_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_uint64) // C=scalar+B GB (_bind1st__lor_uint64) // C=scalar+B' GB (_bind1st_tran__lor_uint64) // C=A+scalar GB (_bind2nd__lor_uint64) // C=A'+scalar GB (_bind2nd_tran__lor_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) || (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_UINT64 || GxB_NO_LOR_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lor_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lor_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lor_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lor_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lor_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Efficient_RANSAC.h
// Copyright (c) 2015 INRIA Sophia-Antipolis (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org). // // $URL$ // $Id$ // SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-Commercial // // // Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez // #ifndef CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H #define CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H #include <CGAL/license/Shape_detection.h> #include <CGAL/Random.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Octree.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Shape_base.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Plane.h> // for octree ------------------------------ #include <boost/iterator/filter_iterator.hpp> #include <CGAL/bounding_box.h> #include <CGAL/Iterator_range.h> //---------- #include <vector> #include <cmath> #include <limits> #include <fstream> #include <sstream> #include <functional> // boost -------------- #include <CGAL/boost/iterator/counting_iterator.hpp> #include <boost/shared_ptr.hpp> #include <boost/make_shared.hpp> //--------------------- namespace CGAL { namespace Shape_detection { /*! \ingroup PkgShapeDetectionRANSAC \brief Shape detection algorithm based on the RANSAC method. Given a point set in 3D space with unoriented normals, sampled on surfaces, this class enables to detect subsets of connected points lying on the surface of primitive shapes. Each input point is assigned to either none or at most one detected primitive shape. The implementation follows \cgalCite{schnabel2007efficient}. \tparam Traits must be a model of `EfficientRANSACTraits`. */ template <class Traits> class Efficient_RANSAC { public: /// \cond SKIP_IN_MANUAL struct Filter_unassigned_points { Filter_unassigned_points() : m_shape_index(dummy) {} Filter_unassigned_points(const std::vector<int> &shapeIndex) : m_shape_index(shapeIndex) {} bool operator()(std::size_t x) { if (x < m_shape_index.size()) return m_shape_index[x] == -1; else return true; // to prevent infinite incrementing } const std::vector<int>& m_shape_index; std::vector<int> dummy; }; typedef boost::filter_iterator<Filter_unassigned_points, boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t> > Point_index_iterator; ///< iterator for indices of points. /// \endcond /// \name Types /// @{ /// \cond SKIP_IN_MANUAL typedef typename Traits::Input_range::iterator Input_iterator; typedef typename Traits::FT FT; ///< number type. typedef typename Traits::Point_3 Point; ///< point type. typedef typename Traits::Vector_3 Vector; ///< vector type. /// \endcond typedef typename Traits::Input_range Input_range; ///< Model of the concept `Range` with random access iterators, providing input points and normals /// through the following two property maps. typedef typename Traits::Point_map Point_map; ///< Property map to access the location of an input point. typedef typename Traits::Normal_map Normal_map; ///< Property map to access the unoriented normal of an input point. typedef Shape_base<Traits> Shape; ///< Shape type. typedef Plane<Traits> Plane_shape; ///< %Plane shape type. #ifdef DOXYGEN_RUNNING typedef unspecified_type Shape_range; ///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`. typedef unspecified_type Plane_range; ///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Plane_shape>`. #else struct Shape_range : public Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base; Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; struct Plane_range : public Iterator_range< typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> Base; Plane_range(boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; #endif #ifdef DOXYGEN_RUNNING typedef unspecified_type Point_index_range; ///< `Iterator_range` with a bidirectional iterator with value type `std::size_t` /// as indices into the input data that has not been assigned to a shape. /// As this range class has no `size()` method, the method /// `Efficient_RANSAC::number_of_unassigned_points()` is provided. #else typedef Iterator_range<Point_index_iterator> Point_index_range; #endif /// @} /// \name Parameters /// @{ /*! Parameters for the shape detection algorithm. They are explained in detail in Section \ref Shape_detection_RANSACParameters of the User Manual. */ struct Parameters { Parameters() : probability((FT) 0.01) , min_points((std::numeric_limits<std::size_t>::max)()) , epsilon(-1) , normal_threshold((FT) 0.9) , cluster_epsilon(-1) {} /*! Probability to control search endurance. %Default value is 0.05. A lower probability provides a higher reliability and determinism at the cost of longer running time due to a higher search endurance. It must belong to the interval [0, 1]. */ FT probability; /*! Minimum number of points in a shape. %Default value is 1% of total number of input points. It must belong to the interval [0, +inf). */ std::size_t min_points; /*! Maximum acceptable Euclidean distance between a point and a shape. %Default value is 1% of the bounding box diagonal. It must belong to the interval [0, +inf). */ FT epsilon; /*! Maximum threshold on the dot product between the estimated shape's normal and the point's normal, that is the cosine of the angle (cos(25°) = 0.9). %Default value is 0.9 (around 25 degrees). It must belong to the interval [0, 1]. */ FT normal_threshold; /*! Maximum acceptable Euclidean distance between points, which are assumed to be neighbors. %Default value is 1% of the bounding box diagonal. It must belong to the interval [0, +inf). */ FT cluster_epsilon; }; /// @} private: typedef internal::Octree<internal::DirectPointAccessor<Traits> > Direct_octree; typedef internal::Octree<internal::IndexedPointAccessor<Traits> > Indexed_octree; //--------------------------------------------typedef // Creates a function pointer for instancing shape instances. template <class ShapeT> static Shape *factory() { return new ShapeT; } public: /// \name Initialization /// @{ /*! Constructs an empty shape detection object. */ Efficient_RANSAC(Traits t = Traits()) : m_traits(t) , m_direct_octrees(nullptr) , m_global_octree(nullptr) , m_num_subsets(0) , m_num_available_points(0) , m_num_total_points(0) , m_valid_iterators(false) {} /*! Releases all memory allocated by this instance including shapes. */ ~Efficient_RANSAC() { clear(); } /*! Retrieves the traits class. */ const Traits& traits() const { return m_traits; } /*! Retrieves the point property map. */ const Point_map& point_map() const { return m_point_pmap; } /*! Retrieves the normal property map. */ const Normal_map& normal() const { return m_normal_pmap; } Input_iterator input_iterator_first() const { return m_input_iterator_first; } Input_iterator input_iterator_beyond() const { return m_input_iterator_beyond; } /*! Sets the input data. The range must stay valid until the detection has been performed and the access to the results is no longer required. The data in the input is reordered by the methods `detect()` and `preprocess()`. This function first calls `clear()`. */ void set_input( Input_range& input_range, ///< Range of input data. Point_map point_map = Point_map(), ///< Property map to access the position of an input point. Normal_map normal_map = Normal_map() ///< Property map to access the normal of an input point. ) { m_point_pmap = point_map; m_normal_pmap = normal_map; m_input_iterator_first = input_range.begin(); m_input_iterator_beyond = input_range.end(); clear(); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points = std::distance( m_input_iterator_first, m_input_iterator_beyond); m_valid_iterators = true; } /*! Registers the shape type `ShapeType` in the detection engine that must inherit from `Shape_base`. For example, for registering a plane as detectable shape, you should call `ransac.add_shape_factory< Shape_detection::Plane<Traits> >();`. Note that if your call is within a template, you should add the `template` keyword just before `add_shape_factory`: `ransac.template add_shape_factory< Shape_detection::Plane<Traits> >();`. */ template <class Shape_type> void add_shape_factory() { m_shape_factories.push_back(factory<Shape_type>); } /*! Constructs internal data structures required for the shape detection. These structures only depend on the input data, i.e. the points and normal vectors. This method is called by `detect()`, if it was not called before by the user. */ bool preprocess() { if (m_num_total_points == 0) return false; // Generation of subsets m_num_subsets = (std::size_t)(std::max<std::ptrdiff_t>)((std::ptrdiff_t) std::floor(std::log(double(m_num_total_points))/std::log(2.))-9, 2); // SUBSET GENERATION -> // approach with increasing subset sizes -> replace with octree later on Input_iterator last = m_input_iterator_beyond - 1; std::size_t remainingPoints = m_num_total_points; m_available_octree_sizes.resize(m_num_subsets); m_direct_octrees = new Direct_octree *[m_num_subsets]; for (int s = int(m_num_subsets) - 1;s >= 0;--s) { std::size_t subsetSize = remainingPoints; std::vector<std::size_t> indices(subsetSize); if (s) { subsetSize >>= 1; for (std::size_t i = 0;i<subsetSize;i++) { std::size_t index = get_default_random()(2); index = index + (i<<1); index = (index >= remainingPoints) ? remainingPoints - 1 : index; indices[i] = index; } // move points to the end of the point vector std::size_t j = subsetSize; do { j--; typename std::iterator_traits<Input_iterator>::value_type tmp = (*last); *last = m_input_iterator_first[indices[std::size_t(j)]]; m_input_iterator_first[indices[std::size_t(j)]] = tmp; last--; } while (j > 0); m_direct_octrees[s] = new Direct_octree( m_traits, last + 1, last + subsetSize + 1, m_point_pmap, m_normal_pmap, remainingPoints - subsetSize); } else m_direct_octrees[0] = new Direct_octree( m_traits, m_input_iterator_first, m_input_iterator_first + (subsetSize), m_point_pmap, m_normal_pmap, 0); m_available_octree_sizes[s] = subsetSize; m_direct_octrees[s]->createTree(m_options.cluster_epsilon); remainingPoints -= subsetSize; } m_global_octree = new Indexed_octree( m_traits, m_input_iterator_first, m_input_iterator_beyond, m_point_pmap, m_normal_pmap); m_global_octree->createTree(m_options.cluster_epsilon); return true; } /// @} /// \name Memory Management /// @{ /*! Removes all shape types registered for detection. */ void clear_shape_factories() { m_shape_factories.clear(); } /*! Frees memory allocated for the internal search structures but keeps the detected shapes. It invalidates the range retrieved using `unassigned_points()`. */ void clear_octrees() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; if (m_global_octree) { delete m_global_octree; m_global_octree = nullptr; } if (m_direct_octrees) { for (std::size_t i = 0;i<m_num_subsets;i++) delete m_direct_octrees[i]; delete [] m_direct_octrees; m_direct_octrees = nullptr; } m_num_subsets = 0; } /*! Calls `clear_octrees()` and removes all detected shapes. All internal structures are cleaned, including formerly detected shapes. Thus iterators and ranges retrieved through `shapes()`, `planes()` and `indices_of_unassigned_points()` are invalidated. */ void clear() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; std::vector<int>().swap(m_shape_index); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; clear_octrees(); clear_shape_factories(); } /// @} /// \name Detection /// @{ /*! Performs the shape detection. Shape types considered during the detection are those registered using `add_shape_factory()`. \param options parameters for shape detection \param callback can be omitted if the algorithm should be run without any callback. It is called regularly when the algorithm is running: the current advancement (between 0.0 and 1.0) is passed as parameter. If it returns `true`, then the algorithm continues its execution normally; if it returns `false`, the algorithm is stopped. Note that this interruption may leave the class in an invalid state. \return `true` if shape types have been registered and input data has been set. Otherwise, `false` is returned. */ bool detect(const Parameters &options = Parameters(), const std::function<bool(double)>& callback = std::function<bool(double)>()) { m_options = options; // No shape types for detection or no points provided, exit if (m_shape_factories.size() == 0 || (m_input_iterator_beyond - m_input_iterator_first) == 0) return false; if (m_num_subsets == 0 || m_global_octree == 0) { if (!preprocess()) return false; } if (callback && !callback(0.)) return false; // Reset data structures possibly used by former search m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; for (std::size_t i = 0;i<m_num_subsets;i++) { m_available_octree_sizes[i] = m_direct_octrees[i]->size(); } // Use bounding box diagonal as reference for default values Bbox_3 bbox = m_global_octree->boundingBox(); FT bbox_diagonal = (FT) CGAL::sqrt( (bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin()) + (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin()) + (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin())); // Epsilon or cluster_epsilon have been set by the user? // If not, derive from bounding box diagonal m_options.epsilon = (m_options.epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.epsilon; m_options.cluster_epsilon = (m_options.cluster_epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon; // Minimum number of points has been set? m_options.min_points = (m_options.min_points >= m_num_available_points) ? (std::size_t)((FT)0.01 * m_num_available_points) : m_options.min_points; m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points; // Initializing the shape index m_shape_index.assign(m_num_available_points, -1); // List of all randomly drawn candidates // with the minimum number of points std::vector<Shape *> candidates; // Identifying minimum number of samples std::size_t required_samples = 0; for (std::size_t i = 0;i<m_shape_factories.size();i++) { Shape *tmp = (Shape *) m_shape_factories[i](); required_samples = (std::max<std::size_t>)(required_samples, tmp->minimum_sample_size()); delete tmp; } std::size_t first_sample; // first sample for RANSAC FT best_expected = 0; // number of points that have been assigned to a shape std::size_t num_invalid = 0; std::size_t generated_candidates = 0; std::size_t failed_candidates = 0; std::size_t limit_failed_candidates = (std::max)(std::size_t(10000), std::size_t(m_input_iterator_beyond - m_input_iterator_first) / std::size_t(100)); bool force_exit = false; bool keep_searching = true; do { // main loop best_expected = 0; if (keep_searching) do { // Generate candidates //1. pick a point p1 randomly among available points std::set<std::size_t> indices; bool done = false; do { do first_sample = get_default_random()( static_cast<unsigned int>(m_num_available_points)); while (m_shape_index[first_sample] != -1); done = m_global_octree->drawSamplesFromCellContainingPoint( get(m_point_pmap, *(m_input_iterator_first + first_sample)), select_random_octree_level(), indices, m_shape_index, required_samples); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; } while (m_shape_index[first_sample] != -1 || !done); generated_candidates++; //add candidate for each type of primitives for(typename std::vector<Shape *(*)()>::iterator it = m_shape_factories.begin(); it != m_shape_factories.end(); it++) { if (callback && !callback(num_invalid / double(m_num_total_points))) return false; Shape *p = (Shape *) (*it)(); //compute the primitive and says if the candidate is valid p->compute(indices, m_input_iterator_first, m_traits, m_point_pmap, m_normal_pmap, m_options.epsilon, m_options.normal_threshold); if (p->is_valid()) { improve_bound(p, m_num_available_points - num_invalid, 1, 500); //evaluate the candidate if(p->max_bound() >= m_options.min_points && p->score() > 0) { if (best_expected < p->expected_value()) best_expected = p->expected_value(); candidates.push_back(p); } else { failed_candidates++; delete p; } } else { failed_candidates++; delete p; } } if (failed_candidates >= limit_failed_candidates) { force_exit = true; } keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while( !force_exit && stop_probability((std::size_t) best_expected, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability && keep_searching); // end of generate candidate if (force_exit) { break; } if (candidates.empty()) continue; // Now get the best candidate in the current set of all candidates // Note that the function sorts the candidates: // the best candidate is always the last element of the vector Shape *best_candidate = get_best_candidate(candidates, m_num_available_points - num_invalid); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // If search is done and the best candidate is too small, we are done. if (!keep_searching && best_candidate->m_score < m_options.min_points) break; if (!best_candidate) continue; best_candidate->m_indices.clear(); best_candidate->m_score = m_global_octree->score(best_candidate, m_shape_index, FT(3) * m_options.epsilon, m_options.normal_threshold); best_expected = static_cast<FT>(best_candidate->m_score); best_candidate->connected_component(best_candidate->m_indices, m_options.cluster_epsilon); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // check score against min_points and clear out candidates if too low if (best_candidate->indices_of_assigned_points().size() < m_options.min_points) { if (!(best_candidate->indices_of_assigned_points().empty())) for (std::size_t i = 0;i < candidates.size() - 1;i++) { if (best_candidate->is_same(candidates[i])) { delete candidates[i]; candidates[i] = nullptr; } } candidates.back() = nullptr; delete best_candidate; best_candidate = nullptr; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // Trimming candidates list std::size_t empty = 0, occupied = 0; while (empty < candidates.size()) { while (empty < candidates.size() && candidates[empty]) empty++; if (empty >= candidates.size()) break; if (occupied < empty) occupied = empty + 1; while (occupied < candidates.size() && !candidates[occupied]) occupied++; if (occupied >= candidates.size()) break; candidates[empty] = candidates[occupied]; candidates[occupied] = nullptr; empty++; occupied++; } candidates.resize(empty); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; } else if (stop_probability((std::size_t) best_candidate->expected_value(), (m_num_available_points - num_invalid), generated_candidates, m_global_octree->maxLevel()) <= m_options.probability) { // Remove candidate from list candidates.back() = nullptr; //1. add best candidate to final result. m_extracted_shapes->push_back( boost::shared_ptr<Shape>(best_candidate)); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; //2. remove the points const std::vector<std::size_t> &indices_points_best_candidate = best_candidate->indices_of_assigned_points(); // update generated candidates to reflect removal of points generated_candidates = std::size_t(std::pow (1.f - (indices_points_best_candidate.size() / float(m_num_available_points - num_invalid)), 3.f) * generated_candidates); //2.3 Remove the points from the subtrees for (std::size_t i = 0;i<indices_points_best_candidate.size();i++) { m_shape_index[indices_points_best_candidate.at(i)] = int(m_extracted_shapes->size()) - 1; num_invalid++; for (std::size_t j = 0;j<m_num_subsets;j++) { if (m_direct_octrees[j] && m_direct_octrees[j]->m_root) { std::size_t offset = m_direct_octrees[j]->offset(); if (offset <= indices_points_best_candidate.at(i) && (indices_points_best_candidate.at(i) - offset) < m_direct_octrees[j]->size()) { m_available_octree_sizes[j]--; } } } } failed_candidates = 0; best_expected = 0; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; std::vector<std::size_t> subset_sizes(m_num_subsets); subset_sizes[0] = m_available_octree_sizes[0]; for (std::size_t i = 1;i<m_num_subsets;i++) { subset_sizes[i] = subset_sizes[i-1] + m_available_octree_sizes[i]; } //3. Remove points from candidates common with extracted primitive //#pragma omp parallel for best_expected = 0; for (std::size_t i=0;i< candidates.size()-1;i++) { if (candidates[i]) { candidates[i]->update_points(m_shape_index); candidates[i]->compute_bound( subset_sizes[candidates[i]->m_nb_subset_used - 1], m_num_available_points - num_invalid); if (candidates[i]->max_bound() < m_options.min_points) { delete candidates[i]; candidates[i] = nullptr; } else { best_expected = (candidates[i]->expected_value() > best_expected) ? candidates[i]->expected_value() : best_expected; } } } if (callback && !callback(num_invalid / double(m_num_total_points))) return false; std::size_t start = 0, end = candidates.size() - 1; while (start < end) { while (candidates[start] && start < end) start++; while (!candidates[end] && start < end) end--; if (!candidates[start] && candidates[end] && start < end) { candidates[start] = candidates[end]; candidates[end] = nullptr; start++; end--; } } if (candidates[end]) end++; candidates.resize(end); } else if (!keep_searching) ++ generated_candidates; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while((keep_searching && FT(m_num_available_points - num_invalid) >= m_options.min_points) || best_expected >= m_options.min_points); // Clean up remaining candidates. for (std::size_t i = 0;i<candidates.size();i++) delete candidates[i]; candidates.resize(0); m_num_available_points -= num_invalid; return true; } /// @} /// \name Access /// @{ /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Shape>` over the detected shapes in the order of detection. Depending on the chosen probability for the detection, the shapes are ordered with decreasing size. */ Shape_range shapes() const { return Shape_range(m_extracted_shapes); } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Plane_shape>` over only the detected planes in the order of detection. Depending on the chosen probability for the detection, the planes are ordered with decreasing size. */ Plane_range planes() const { boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > planes = boost::make_shared<std::vector<boost::shared_ptr<Plane_shape> > >(); for (std::size_t i = 0; i < m_extracted_shapes->size(); ++ i) { boost::shared_ptr<Plane_shape> pshape = boost::dynamic_pointer_cast<Plane_shape>((*m_extracted_shapes)[i]); // Ignore all shapes other than plane if (pshape != boost::shared_ptr<Plane_shape>()) planes->push_back (pshape); } return Plane_range(planes); } /*! Number of points not assigned to a shape. */ std::size_t number_of_unassigned_points() const { return m_num_available_points; } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t` as indices into the input data that has not been assigned to a shape. */ Point_index_range indices_of_unassigned_points() { Filter_unassigned_points fup(m_shape_index); Point_index_iterator p1 = boost::make_filter_iterator<Filter_unassigned_points>( fup, boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(0), boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(m_shape_index.size())); return make_range(p1, Point_index_iterator(p1.end())); } /// @} private: int select_random_octree_level() { return (int) get_default_random()( static_cast<unsigned int>(m_global_octree->maxLevel() + 1)); } Shape* get_best_candidate(std::vector<Shape* >& candidates, const std::size_t num_available_points) { if (candidates.size() == 1) return candidates.back(); int index_worse_candidate = 0; bool improved = true; while (index_worse_candidate < (int)candidates.size() - 1 && improved) { improved = false; typename Shape::Compare_by_max_bound comp; std::sort(candidates.begin() + index_worse_candidate, candidates.end(), comp); //refine the best one improve_bound(candidates.back(), num_available_points, m_num_subsets, m_options.min_points); int position_stop; //Take all those intersecting the best one, check for equal ones for (position_stop = int(candidates.size()) - 1; position_stop > index_worse_candidate; position_stop--) { if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore if (candidates.at(position_stop)->max_bound() <= m_options.min_points) break; //the following candidate doesn't have enough points! //if we reach this point, there is an overlap // between best one and position_stop //so request refining bound on position_stop improved |= improve_bound(candidates.at(position_stop), num_available_points, m_num_subsets, m_options.min_points); //test again after refined if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore } index_worse_candidate = position_stop; } return candidates.back(); } bool improve_bound(Shape *candidate, std::size_t num_available_points, std::size_t max_subset, std::size_t min_points) { if (candidate->m_nb_subset_used >= max_subset) return false; if (candidate->m_nb_subset_used >= m_num_subsets) return false; candidate->m_nb_subset_used = (candidate->m_nb_subset_used >= m_num_subsets) ? m_num_subsets - 1 : candidate->m_nb_subset_used; //what it does is add another subset and recompute lower and upper bound //the next subset to include is provided by m_nb_subset_used std::size_t num_points_evaluated = 0; for (std::size_t i=0;i<candidate->m_nb_subset_used;i++) num_points_evaluated += m_available_octree_sizes[i]; // need score of new subset as well as sum of // the score of the previous considered subset std::size_t new_score = 0; std::size_t new_sampled_points = 0; do { new_score = m_direct_octrees[candidate->m_nb_subset_used]->score( candidate, m_shape_index, m_options.epsilon, m_options.normal_threshold); candidate->m_score += new_score; num_points_evaluated += m_available_octree_sizes[candidate->m_nb_subset_used]; new_sampled_points += m_available_octree_sizes[candidate->m_nb_subset_used]; candidate->m_nb_subset_used++; } while (new_sampled_points < min_points && candidate->m_nb_subset_used < m_num_subsets); candidate->m_score = candidate->m_indices.size(); candidate->compute_bound(num_points_evaluated, num_available_points); return true; } inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const { return (std::min<FT>)(std::pow((FT) 1.f - (FT) largest_candidate / FT(num_pts * octree_depth * 4), (int) num_candidates), (FT) 1); } private: Parameters m_options; // Traits class. Traits m_traits; // Octrees build on input data for quick shape evaluation and // sample selection within an octree cell. Direct_octree **m_direct_octrees; Indexed_octree *m_global_octree; std::vector<std::size_t> m_available_octree_sizes; std::size_t m_num_subsets; // maps index into points to assigned extracted primitive std::vector<int> m_shape_index; std::size_t m_num_available_points; std::size_t m_num_total_points; //give the index of the subset of point i std::vector<int> m_index_subsets; boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; std::vector<Shape *(*)()> m_shape_factories; // iterators of input data bool m_valid_iterators; Input_iterator m_input_iterator_first, m_input_iterator_beyond; Point_map m_point_pmap; Normal_map m_normal_pmap; }; } } #endif // CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
ocp_nlp_common.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_common.h" #include <assert.h> #include <stdlib.h> #include <string.h> #include <math.h> // blasfeo #include "blasfeo/include/blasfeo_common.h" #include "blasfeo/include/blasfeo_d_blas.h" // hpipm #include "hpipm/include/hpipm_d_ocp_qp_dim.h" // acados #include "acados/utils/mem.h" /************************************************ * config ************************************************/ int ocp_nlp_config_calculate_size(int N) { int ii; int size = 0; // self size += sizeof(ocp_nlp_config); // qp solver size += 1 * ocp_qp_xcond_solver_config_calculate_size(); // regularization size += ocp_nlp_reg_config_calculate_size(); // dynamics size += N * sizeof(ocp_nlp_dynamics_config *); for (ii = 0; ii < N; ii++) size += ocp_nlp_dynamics_config_calculate_size(); // cost size += (N + 1) * sizeof(ocp_nlp_cost_config *); for (ii = 0; ii <= N; ii++) size += ocp_nlp_cost_config_calculate_size(); // constraints size += (N + 1) * sizeof(ocp_nlp_constraints_config *); for (ii = 0; ii <= N; ii++) size += ocp_nlp_constraints_config_calculate_size(); return size; } ocp_nlp_config *ocp_nlp_config_assign(int N, void *raw_memory) { int ii; char *c_ptr = (char *) raw_memory; ocp_nlp_config *config = (ocp_nlp_config *) c_ptr; c_ptr += sizeof(ocp_nlp_config); config->N = N; // qp solver config->qp_solver = ocp_qp_xcond_solver_config_assign(c_ptr); c_ptr += ocp_qp_xcond_solver_config_calculate_size(); // regularization config->regularize = ocp_nlp_reg_config_assign(c_ptr); c_ptr += ocp_nlp_reg_config_calculate_size(); // dynamics config->dynamics = (ocp_nlp_dynamics_config **) c_ptr; c_ptr += N * sizeof(ocp_nlp_dynamics_config *); for (ii = 0; ii < N; ii++) { config->dynamics[ii] = ocp_nlp_dynamics_config_assign(c_ptr); c_ptr += ocp_nlp_dynamics_config_calculate_size(); } // cost config->cost = (ocp_nlp_cost_config **) c_ptr; c_ptr += (N + 1) * sizeof(ocp_nlp_cost_config *); for (ii = 0; ii <= N; ii++) { config->cost[ii] = ocp_nlp_cost_config_assign(c_ptr); c_ptr += ocp_nlp_cost_config_calculate_size(); } // constraints config->constraints = (ocp_nlp_constraints_config **) c_ptr; c_ptr += (N + 1) * sizeof(ocp_nlp_constraints_config *); for (ii = 0; ii <= N; ii++) { config->constraints[ii] = ocp_nlp_constraints_config_assign(c_ptr); c_ptr += ocp_nlp_constraints_config_calculate_size(); } return config; } /************************************************ * dims ************************************************/ static int ocp_nlp_dims_calculate_size_self(int N) { int size = 0; size += sizeof(ocp_nlp_dims); // nlp sizes size += 6 * (N + 1) * sizeof(int); // nv, nx, nu, ni, nz, ns // dynamics size += N * sizeof(void *); // cost size += (N + 1) * sizeof(void *); // constraints size += (N + 1) * sizeof(void *); // regularization size += ocp_nlp_reg_dims_calculate_size(N); size += sizeof(ocp_nlp_reg_dims); size += 8; // initial align return size; } int ocp_nlp_dims_calculate_size(void *config_) { ocp_nlp_config *config = config_; int N = config->N; int ii; int size = 0; // self size += ocp_nlp_dims_calculate_size_self(N); // dynamics for (ii = 0; ii < N; ii++) size += config->dynamics[ii]->dims_calculate_size(config->dynamics[ii]); // cost for (ii = 0; ii <= N; ii++) size += config->cost[ii]->dims_calculate_size(config->cost[ii]); // constraints for (ii = 0; ii <= N; ii++) size += config->constraints[ii]->dims_calculate_size(config->constraints[ii]); // qp solver size += config->qp_solver->dims_calculate_size(config->qp_solver, N); return size; } static ocp_nlp_dims *ocp_nlp_dims_assign_self(int N, void *raw_memory) { char *c_ptr = (char *) raw_memory; int ii; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_dims *dims = (ocp_nlp_dims *) c_ptr; c_ptr += sizeof(ocp_nlp_dims); // nv assign_and_advance_int(N + 1, &dims->nv, &c_ptr); // nx assign_and_advance_int(N + 1, &dims->nx, &c_ptr); // nu assign_and_advance_int(N + 1, &dims->nu, &c_ptr); // ni assign_and_advance_int(N + 1, &dims->ni, &c_ptr); // nz assign_and_advance_int(N + 1, &dims->nz, &c_ptr); // ns assign_and_advance_int(N + 1, &dims->ns, &c_ptr); // dynamics dims->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); // cost dims->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // constraints dims->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // regularization dims->regularize = ocp_nlp_reg_dims_assign(N, c_ptr); c_ptr += ocp_nlp_reg_dims_calculate_size(N); /* initialize qp_solver dimensions */ // dims->qp_solver->N = N; // for (ii = 0; ii <= N; ii++) // { // TODO(dimitris): values below are needed for reformulation of QP when soft constraints // are not supported. Make this a bit more transparent as it clushes with nbx/nbu above. // dims->qp_solver->nsbx[ii] = 0; // dims->qp_solver->nsbu[ii] = 0; // dims->qp_solver->nsg[ii] = 0; // } // N dims->N = N; // initialize dimensions to zero by default // nv for(ii=0; ii<=N; ii++) dims->nv[ii] = 0; // nx for(ii=0; ii<=N; ii++) dims->nx[ii] = 0; // nu for(ii=0; ii<=N; ii++) dims->nu[ii] = 0; // ni for(ii=0; ii<=N; ii++) dims->ni[ii] = 0; // nz for(ii=0; ii<=N; ii++) dims->nz[ii] = 0; // ns for(ii=0; ii<=N; ii++) dims->ns[ii] = 0; // TODO initialize dims to zero by default also in modules !!!!!!! // assert assert((char *) raw_memory + ocp_nlp_dims_calculate_size_self(N) >= c_ptr); return dims; } ocp_nlp_dims *ocp_nlp_dims_assign(void *config_, void *raw_memory) { ocp_nlp_config *config = config_; int N = config->N; int ii; char *c_ptr = (char *) raw_memory; // self ocp_nlp_dims *dims = ocp_nlp_dims_assign_self(N, c_ptr); c_ptr += ocp_nlp_dims_calculate_size_self(N); // dynamics for (ii = 0; ii < N; ii++) { dims->dynamics[ii] = config->dynamics[ii]->dims_assign(config->dynamics[ii], c_ptr); c_ptr += config->dynamics[ii]->dims_calculate_size(config->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { dims->cost[ii] = config->cost[ii]->dims_assign(config->cost[ii], c_ptr); c_ptr += config->cost[ii]->dims_calculate_size(config->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { dims->constraints[ii] = config->constraints[ii]->dims_assign(config->constraints[ii], c_ptr); c_ptr += config->constraints[ii]->dims_calculate_size(config->constraints[ii]); } // qp solver dims->qp_solver = config->qp_solver->dims_assign(config->qp_solver, N, c_ptr); c_ptr += config->qp_solver->dims_calculate_size(config->qp_solver, N); // assert assert((char *) raw_memory + ocp_nlp_dims_calculate_size(config_) >= c_ptr); return dims; } void ocp_nlp_dims_set_opt_vars(void *config_, void *dims_, const char *field, const void* value_array) { // to set dimension nx, nu, nz, ns (number of slacks = number of soft constraints) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int ii; int N = config->N; int *int_array = (int *) value_array; /* set ocp_nlp dimension */ if (!strcmp(field, "nx")) { // opt var for (ii = 0; ii <= N; ii++) { // set nx dims->nx[ii] = int_array[ii]; // update nv dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nx", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx", &int_array[i]); } for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx1", &int_array[i+1]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nx", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nx", &int_array[i]); } // regularization for (ii = 0; ii <= N; ii++) { config->regularize->dims_set(config->regularize, dims->regularize, ii, "nx", &int_array[ii]); } } else if (!strcmp(field, "nu")) { // nlp opt var for (int ii = 0; ii <= N; ii++) { // set nu dims->nu[ii] = int_array[ii]; // update nv dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nu", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu", &int_array[i]); } for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu1", &int_array[i+1]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nu", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nu", &int_array[i]); } // regularization for (ii = 0; ii <= N; ii++) { config->regularize->dims_set(config->regularize, dims->regularize, ii, "nu", &int_array[ii]); } } else if (!strcmp(field, "nz")) { // nlp opt var for (int ii = 0; ii <= N; ii++) { // set nz dims->nz[ii] = int_array[ii]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nz", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nz", &int_array[i]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nz", &int_array[i]); } } else if (!strcmp(field, "ns")) { // nlp opt var for (int ii = 0; ii <= N; ii++) { // set ns dims->ns[ii] = int_array[ii]; // update nv dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "ns", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ns", &int_array[i]); } } else { printf("error: dims type not available in module ocp_nlp: %s", field); exit(1); } #if 0 /* set ocp_nlp submodule dimensions */ if (strcmp(field, "ns")) // dynamics do not contain slack/soft constraints { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], field, &int_array[i]); } } if (!strcmp(field, "nu")) { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu1", &int_array[i+1]); } } if (!strcmp(field, "nx")) { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx1", &int_array[i+1]); } } for (int i = 0; i <= N; i++) // cost { config->cost[i]->dims_set(config->cost[i], dims->cost[i], field, &int_array[i]); } for (int i = 0; i <= N; i++) // constraints { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], field, &int_array[i]); } if (strcmp(field, "nz")) // qp_solver does not contain nz { for (int i = 0; i <= N; i++) // qp_solver { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, &int_array[i]); } } #endif return; } void ocp_nlp_dims_set_constraints(void *config_, void *dims_, int stage, const char *field, const void* value_) { // to set dimension nbx, nbu, ng, nh, nq (quadratic over nonlinear) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value_; int i = stage; // set in constraint module config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], field, int_value); // update ni in ocp_nlp dimensions config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "ni", &dims->ni[i]); // update qp_solver dims if ( (!strcmp(field, "nbx")) || (!strcmp(field, "nbu")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); // regularization config->regularize->dims_set(config->regularize, dims->regularize, i, (char *) field, int_value); } else if ( (!strcmp(field, "nsbx")) || (!strcmp(field, "nsbu")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); } else if ( (!strcmp(field, "ng")) || (!strcmp(field, "nh")) || (!strcmp(field, "nphi"))) { // update ng_qp_solver in qp_solver int ng_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "ng_qp_solver", &ng_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ng", &ng_qp_solver); // regularization config->regularize->dims_set(config->regularize, dims->regularize, i, "ng", &ng_qp_solver); } else if ( (!strcmp(field, "nsg")) || (!strcmp(field, "nsh")) || (!strcmp(field, "nsphi"))) { // update ng_qp_solver in qp_solver int nsg_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nsg_qp_solver", &nsg_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nsg", &nsg_qp_solver); } else if ( (!strcmp(field, "nbxe")) || (!strcmp(field, "nbue")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); } else if ( (!strcmp(field, "nge")) || (!strcmp(field, "nhe")) || (!strcmp(field, "nphie"))) { // update ng_qp_solver in qp_solver int ng_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nge_qp_solver", &ng_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nge", &ng_qp_solver); } return; } void ocp_nlp_dims_set_cost(void *config_, void *dims_, int stage, const char *field, const void* value_) { // to set dimension ny (output) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value_; config->cost[stage]->dims_set(config->cost[stage], dims->cost[stage], field, int_value); } void ocp_nlp_dims_set_dynamics(void *config_, void *dims_, int stage, const char *field, const void* value) { // mainly for gnsf dimensions ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value; config->dynamics[stage]->dims_set(config->dynamics[stage], dims->dynamics[stage], field, int_value); } /************************************************ * in ************************************************/ int ocp_nlp_in_calculate_size_self(int N) { int size = sizeof(ocp_nlp_in); size += N * sizeof(double); // Ts size += N * sizeof(void *); // dynamics size += (N + 1) * sizeof(void *); // cost size += (N + 1) * sizeof(void *); // constraints return size; } int ocp_nlp_in_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims) { int ii; int N = dims->N; int size = ocp_nlp_in_calculate_size_self(N); // dynamics for (ii = 0; ii < N; ii++) { size += config->dynamics[ii]->model_calculate_size(config->dynamics[ii], dims->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += config->cost[ii]->model_calculate_size(config->cost[ii], dims->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += config->constraints[ii]->model_calculate_size(config->constraints[ii], dims->constraints[ii]); } size += 8; // initial align size += 8; // final align // make_int_multiple_of(64, &size); return size; } ocp_nlp_in *ocp_nlp_in_assign_self(int N, void *raw_memory) { char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_in *in = (ocp_nlp_in *) c_ptr; c_ptr += sizeof(ocp_nlp_in); // Ts assign_and_advance_double(N, &in->Ts, &c_ptr); // dynamics in->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); // cost in->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // constraints in->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); align_char_to(8, &c_ptr); return in; } ocp_nlp_in *ocp_nlp_in_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory) { int ii; int N = dims->N; char *c_ptr = (char *) raw_memory; // struct ocp_nlp_in *in = ocp_nlp_in_assign_self(N, c_ptr); c_ptr += ocp_nlp_in_calculate_size_self(N); // dynamics for (ii = 0; ii < N; ii++) { in->dynamics[ii] = config->dynamics[ii]->model_assign(config->dynamics[ii], dims->dynamics[ii], c_ptr); c_ptr += config->dynamics[ii]->model_calculate_size(config->dynamics[ii], dims->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { in->cost[ii] = config->cost[ii]->model_assign(config->cost[ii], dims->cost[ii], c_ptr); c_ptr += config->cost[ii]->model_calculate_size(config->cost[ii], dims->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { in->constraints[ii] = config->constraints[ii]->model_assign(config->constraints[ii], dims->constraints[ii], c_ptr); c_ptr += config->constraints[ii]->model_calculate_size(config->constraints[ii], dims->constraints[ii]); } assert((char *) raw_memory + ocp_nlp_in_calculate_size(config, dims) >= c_ptr); return in; } /************************************************ * out ************************************************/ int ocp_nlp_out_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; int size = sizeof(ocp_nlp_out); size += 4 * (N + 1) * sizeof(struct blasfeo_dvec); // ux, lam, t, z size += 1 * N * sizeof(struct blasfeo_dvec); // pi for (int ii = 0; ii < N; ii++) { size += 1 * blasfeo_memsize_dvec(nv[ii]); // ux size += 1 * blasfeo_memsize_dvec(nz[ii]); // z size += 2 * blasfeo_memsize_dvec(2 * ni[ii]); // lam, t size += 1 * blasfeo_memsize_dvec(nx[ii + 1]); // pi } size += 1 * blasfeo_memsize_dvec(nv[N]); // ux size += 1 * blasfeo_memsize_dvec(nz[N]); // z size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // lam, t size += 8; // initial align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_out *ocp_nlp_out_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory) { // loop index int ii; // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); ocp_nlp_out *out = (ocp_nlp_out *) c_ptr; c_ptr += sizeof(ocp_nlp_out); // blasfeo_struct align align_char_to(8, &c_ptr); // blasfeo_dvec_struct // ux assign_and_advance_blasfeo_dvec_structs(N + 1, &out->ux, &c_ptr); // z assign_and_advance_blasfeo_dvec_structs(N + 1, &out->z, &c_ptr); // pi assign_and_advance_blasfeo_dvec_structs(N, &out->pi, &c_ptr); // lam assign_and_advance_blasfeo_dvec_structs(N + 1, &out->lam, &c_ptr); // t assign_and_advance_blasfeo_dvec_structs(N + 1, &out->t, &c_ptr); // blasfeo_mem align align_char_to(64, &c_ptr); // blasfeo_dvec // ux for (int ii = 0; ii <= N; ++ii) { assign_and_advance_blasfeo_dvec_mem(nv[ii], out->ux + ii, &c_ptr); } // z for (int ii = 0; ii <= N; ++ii) { assign_and_advance_blasfeo_dvec_mem(nz[ii], out->z + ii, &c_ptr); } // pi for (int ii = 0; ii < N; ++ii) { assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], out->pi + ii, &c_ptr); } // lam for (int ii = 0; ii <= N; ++ii) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], out->lam + ii, &c_ptr); } // t for (int ii = 0; ii <= N; ++ii) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], out->t + ii, &c_ptr); } // zero solution for(ii=0; ii<N; ii++) { blasfeo_dvecse(nv[ii], 0.0, out->ux+ii, 0); blasfeo_dvecse(nz[ii], 0.0, out->z+ii, 0); blasfeo_dvecse(nx[ii+1], 0.0, out->pi+ii, 0); blasfeo_dvecse(2*ni[ii], 0.0, out->lam+ii, 0); blasfeo_dvecse(2*ni[ii], 0.0, out->t+ii, 0); } ii = N; blasfeo_dvecse(nv[ii], 0.0, out->ux+ii, 0); blasfeo_dvecse(nz[ii], 0.0, out->z+ii, 0); blasfeo_dvecse(2*ni[ii], 0.0, out->lam+ii, 0); blasfeo_dvecse(2*ni[ii], 0.0, out->t+ii, 0); assert((char *) raw_memory + ocp_nlp_out_calculate_size(config, dims) >= c_ptr); return out; } /************************************************ * options ************************************************/ int ocp_nlp_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; int size = 0; size += sizeof(ocp_nlp_opts); size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); size += config->regularize->opts_calculate_size(); // dynamics size += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { size += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]); } // cost size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]); } // constraints size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]); } size += 2*8; // 2 aligns return size; } void *ocp_nlp_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; char *c_ptr = (char *) raw_memory; align_char_to(8, &c_ptr); ocp_nlp_opts *opts = (ocp_nlp_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_opts); /* pointers to substructures */ opts->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); opts->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); opts->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); align_char_to(8, &c_ptr); /* substructures */ opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); opts->regularize = config->regularize->opts_assign(c_ptr); c_ptr += config->regularize->opts_calculate_size(); // dynamics for (int ii = 0; ii < N; ii++) { opts->dynamics[ii] = dynamics[ii]->opts_assign(dynamics[ii], dims->dynamics[ii], c_ptr); c_ptr += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { opts->cost[ii] = cost[ii]->opts_assign(cost[ii], dims->cost[ii], c_ptr); c_ptr += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { opts->constraints[ii] = constraints[ii]->opts_assign(constraints[ii], dims->constraints[ii], c_ptr); c_ptr += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]); } assert((char *) raw_memory + ocp_nlp_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; ocp_nlp_reg_config *regularize = config->regularize; int ii; int N = dims->N; opts->reuse_workspace = 1; #if defined(ACADOS_WITH_OPENMP) opts->num_threads = ACADOS_NUM_THREADS; #endif opts->globalization = FIXED_STEP; opts->step_length = 1.0; opts->levenberg_marquardt = 0.0; /* submodules opts */ // qp solver qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize); // dynamics for (ii = 0; ii < N; ii++) { dynamics[ii]->opts_initialize_default(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { cost[ii]->opts_initialize_default(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { constraints[ii]->opts_initialize_default(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } return; } void ocp_nlp_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int ii; int N = dims->N; qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { dynamics[ii]->opts_update(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { cost[ii]->opts_update(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { constraints[ii]->opts_update(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } return; } void ocp_nlp_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_; ocp_nlp_config *config = config_; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name, i.e. substring in field before '_' char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value); } else // nlp opts { if (!strcmp(field, "reuse_workspace")) { int* reuse_workspace = (int *) value; opts->reuse_workspace = *reuse_workspace; } else if (!strcmp(field, "num_threads")) { int* num_threads = (int *) value; opts->num_threads = *num_threads; } else if (!strcmp(field, "step_length")) { double* step_length = (double *) value; opts->step_length = *step_length; } else if (!strcmp(field, "globalization")) { char* globalization = (char *) value; if (!strcmp(globalization, "fixed_step")) { opts->globalization = FIXED_STEP; } else if (!strcmp(globalization, "merit_backtracking")) { opts->globalization = MERIT_BACKTRACKING; } else { printf("\nerror: ocp_nlp_opts_set: not supported value for globalization, got: %s\n", globalization); exit(1); } } else if (!strcmp(field, "levenberg_marquardt")) { double* levenberg_marquardt = (double *) value; opts->levenberg_marquardt = *levenberg_marquardt; } else if (!strcmp(field, "exact_hess")) { int N = config->N; // cost for (ii=0; ii<=N; ii++) config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value); // dynamics for (ii=0; ii<N; ii++) config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value); // constraints for (ii=0; ii<=N; ii++) config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value); } // selectively turn on exact hessian contributions else if (!strcmp(field, "exact_hess_cost")) { int N = config->N; for (ii=0; ii<=N; ii++) config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value); } else if (!strcmp(field, "exact_hess_dyn")) { int N = config->N; for (ii=0; ii<N; ii++) config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value); } else if (!strcmp(field, "exact_hess_constr")) { int N = config->N; for (ii=0; ii<=N; ii++) config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value); } else { printf("\nerror: ocp_nlp_opts_set: wrong field: %s\n", field); exit(1); } } return; } void ocp_nlp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value) { ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_; ocp_nlp_config *config = config_; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to dynamics module if ( ptr_module!=NULL && (!strcmp(ptr_module, "dynamics")) ) { config->dynamics[stage]->opts_set( config->dynamics[stage], opts->dynamics[stage], field+module_length+1, value ); } // pass options to cost module else if ( ptr_module!=NULL && (!strcmp(ptr_module, "cost")) ) { config->cost[stage]->opts_set( config->cost[stage], opts->cost[stage], field+module_length+1, value); } // pass options to constraint module else if ( ptr_module!=NULL && (!strcmp(ptr_module, "constraints")) ) { config->constraints[stage]->opts_set( config->constraints[stage], opts->constraints[stage], (char *) field+module_length+1, value); } else { printf("\nerror: ocp_nlp_opts_set_at_stage: wrong field: %s\n", field); exit(1); } return; } /************************************************ * memory ************************************************/ int ocp_nlp_memory_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nz = dims->nz; int *nu = dims->nu; int *ni = dims->ni; int size = sizeof(ocp_nlp_memory); // qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); // qp solver size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // dynamics size += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { size += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } size += (N+1)*sizeof(bool); // set_sim_guess size += (N+1)*sizeof(struct blasfeo_dmat); // dzduxt size += 6*(N+1)*sizeof(struct blasfeo_dvec); // cost_grad ineq_fun ineq_adj dyn_adj sim_guess z_alg size += 1*N*sizeof(struct blasfeo_dvec); // dyn_fun for (int ii = 0; ii < N; ii++) { size += 1*blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]); // dzduxt size += 1*blasfeo_memsize_dvec(nz[ii]); // z_alg size += 2*blasfeo_memsize_dvec(nv[ii]); // cost_grad ineq_adj size += 1*blasfeo_memsize_dvec(nu[ii] + nx[ii]); // dyn_adj size += 1*blasfeo_memsize_dvec(nx[ii + 1]); // dyn_fun size += 1*blasfeo_memsize_dvec(2 * ni[ii]); // ineq_fun size += 1*blasfeo_memsize_dvec(nx[ii] + nz[ii]); // sim_guess } size += 1*blasfeo_memsize_dmat(nu[N]+nx[N], nz[N]); // dzduxt size += 1*blasfeo_memsize_dvec(nz[N]); // z_alg size += 2*blasfeo_memsize_dvec(nv[N]); // cost_grad ineq_adj size += 1*blasfeo_memsize_dvec(nu[N] + nx[N]); // dyn_adj size += 1*blasfeo_memsize_dvec(2 * ni[N]); // ineq_fun size += 1*blasfeo_memsize_dvec(nx[N] + nz[N]); // sim_guess size += 8; // initial align size += 8; // middle align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_memory *ocp_nlp_memory_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, void *raw_memory) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nz = dims->nz; int *nu = dims->nu; int *ni = dims->ni; char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_memory *mem = (ocp_nlp_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_memory); /* pointers to substructures */ // dynamics mem->dynamics = (void **) c_ptr; c_ptr += N*sizeof(void *); // cost mem->cost = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // constraints mem->constraints = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // middle align align_char_to(8, &c_ptr); /* substructures */ // qp in mem->qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out mem->qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); // QP solver mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr); c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize, opts->regularize, c_ptr); c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // dynamics for (int ii = 0; ii < N; ii++) { mem->dynamics[ii] = dynamics[ii]->memory_assign(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii], c_ptr); c_ptr += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { mem->cost[ii] = cost[ii]->memory_assign(cost[ii], dims->cost[ii], opts->cost[ii], c_ptr); c_ptr += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { mem->constraints[ii] = constraints[ii]->memory_assign(constraints[ii], dims->constraints[ii], opts->constraints[ii], c_ptr); c_ptr += constraints[ii]->memory_calculate_size( constraints[ii], dims->constraints[ii], opts->constraints[ii]); } // blasfeo_struct align align_char_to(8, &c_ptr); // dzduxt assign_and_advance_blasfeo_dmat_structs(N + 1, &mem->dzduxt, &c_ptr); // z_alg assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->z_alg, &c_ptr); // cost_grad assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->cost_grad, &c_ptr); // ineq_fun assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_fun, &c_ptr); // ineq_adj assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_adj, &c_ptr); // dyn_fun assign_and_advance_blasfeo_dvec_structs(N, &mem->dyn_fun, &c_ptr); // dyn_adj assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->dyn_adj, &c_ptr); // sim_guess assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->sim_guess, &c_ptr); // set_sim_guess assign_and_advance_bool(N+1, &mem->set_sim_guess, &c_ptr); for (int ii = 0; ii <= N; ++ii) { mem->set_sim_guess[ii] = false; } // blasfeo_mem align align_char_to(64, &c_ptr); // dzduxt for (int ii=0; ii<=N; ii++) { assign_and_advance_blasfeo_dmat_mem(nu[ii]+nx[ii], nz[ii], mem->dzduxt+ii, &c_ptr); } // z_alg for (int ii=0; ii<=N; ii++) { blasfeo_create_dvec(nz[ii], mem->z_alg+ii, c_ptr); c_ptr += blasfeo_memsize_dvec(nz[ii]); } // cost_grad for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nv[ii], mem->cost_grad + ii, &c_ptr); } // ineq_fun for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], mem->ineq_fun + ii, &c_ptr); } // ineq_adj for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nv[ii], mem->ineq_adj + ii, &c_ptr); } // dyn_fun for (int ii = 0; ii < N; ii++) { assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], mem->dyn_fun + ii, &c_ptr); } // dyn_adj for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nu[ii] + nx[ii], mem->dyn_adj + ii, &c_ptr); } // sim_guess for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nx[ii] + nz[ii], mem->sim_guess + ii, &c_ptr); // set to 0; blasfeo_dvecse(nx[ii] + nz[ii], 0.0, mem->sim_guess+ii, 0); // printf("sim_guess ii %d: %p\n", ii, mem->sim_guess+ii); } // printf("created memory %p\n", mem); return mem; } /************************************************ * workspace ************************************************/ int ocp_nlp_workspace_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int ii; int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; int size = 0; int size_tmp = 0; int tmp; // nlp size += sizeof(ocp_nlp_workspace); // tmp_nlp_out size += ocp_nlp_out_calculate_size(config, dims); // weight_merit_fun size += ocp_nlp_out_calculate_size(config, dims); // array of pointers // cost size += (N+1)*sizeof(void *); // dynamics size += N*sizeof(void *); // constraints size += (N+1)*sizeof(void *); // module workspace if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } #else // qp solver tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (ii = 0; ii < N; ii++) { tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (ii = 0; ii <= N; ii++) { tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (ii = 0; ii <= N; ii++) { tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } size += size_tmp; #endif } else { // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } } size += 8; // struct align return size; } ocp_nlp_workspace *ocp_nlp_workspace_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, ocp_nlp_memory *mem, void *raw_memory) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; char *c_ptr = (char *) raw_memory; ocp_nlp_workspace *work = (ocp_nlp_workspace *) c_ptr; c_ptr += sizeof(ocp_nlp_workspace); /* pointers to substructures */ // work->dynamics = (void **) c_ptr; c_ptr += N*sizeof(void *); // work->cost = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // work->constraints = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); align_char_to(8, &c_ptr); /* substructures */ // tmp_nlp_out work->tmp_nlp_out = ocp_nlp_out_assign(config, dims, c_ptr); c_ptr += ocp_nlp_out_calculate_size(config, dims); // weight_merit_fun work->weight_merit_fun = ocp_nlp_out_assign(config, dims, c_ptr); c_ptr += ocp_nlp_out_calculate_size(config, dims); if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } #else int size_tmp = 0; int tmp; // qp solver work->qp_work = (void *) c_ptr; tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } c_ptr += size_tmp; #endif } else { // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } } assert((char *) work + ocp_nlp_workspace_calculate_size(config, dims, opts) >= c_ptr); return work; } /************************************************ * functions ************************************************/ void ocp_nlp_initialize_qp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int ii; int N = dims->N; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (ii = 0; ii <= N; ii++) { // cost config->cost[ii]->initialize(config->cost[ii], dims->cost[ii], in->cost[ii], opts->cost[ii], mem->cost[ii], work->cost[ii]); // dynamics if (ii < N) config->dynamics[ii]->initialize(config->dynamics[ii], dims->dynamics[ii], in->dynamics[ii], opts->dynamics[ii], mem->dynamics[ii], work->dynamics[ii]); // constraints config->constraints[ii]->initialize(config->constraints[ii], dims->constraints[ii], in->constraints[ii], opts->constraints[ii], mem->constraints[ii], work->constraints[ii]); } return; } void ocp_nlp_initialize_t_slacks(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int ii; struct blasfeo_dvec *ineq_fun; int N = dims->N; int *ni = dims->ni; int *ns = dims->ns; int *nx = dims->nx; int *nu = dims->nu; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (ii = 0; ii <= N; ii++) { // copy out->ux to tmp_nlp_out->ux, since this is used in compute_fun blasfeo_dveccp(nx[ii]+nu[ii]+2*ns[ii], out->ux+ii, 0, work->tmp_nlp_out->ux+ii, 0); // evaluate inequalities config->constraints[ii]->compute_fun(config->constraints[ii], dims->constraints[ii], in->constraints[ii], opts->constraints[ii], mem->constraints[ii], work->constraints[ii]); ineq_fun = config->constraints[ii]->memory_get_fun_ptr(mem->constraints[ii]); // t = -ineq_fun blasfeo_dveccpsc(2 * ni[ii], -1.0, ineq_fun, 0, out->t + ii, 0); } return; } void ocp_nlp_approximate_qp_matrices(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; /* stage-wise multiple shooting lagrangian evaluation */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // init Hessian to 0 blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, mem->qp_in->RSQrq+i, 0, 0); if (i < N) { // Levenberg Marquardt term: Ts[i] * levenberg_marquardt * eye() if (opts->levenberg_marquardt > 0.0) blasfeo_ddiare(nu[i] + nx[i], in->Ts[i] * opts->levenberg_marquardt, mem->qp_in->RSQrq+i, 0, 0); // dynamics config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); } else { // Levenberg Marquardt term: 1.0 * levenberg_marquardt * eye() if (opts->levenberg_marquardt > 0.0) blasfeo_ddiare(nu[i] + nx[i], opts->levenberg_marquardt, mem->qp_in->RSQrq+i, 0, 0); } // cost config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); // constraints config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } /* collect stage-wise evaluations */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i <= N; i++) { // nlp mem: cost_grad struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]); blasfeo_dveccp(nv[i], cost_grad, 0, mem->cost_grad + i, 0); // nlp mem: dyn_fun if (i < N) { struct blasfeo_dvec *dyn_fun = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); blasfeo_dveccp(nx[i + 1], dyn_fun, 0, mem->dyn_fun + i, 0); } // nlp mem: dyn_adj if (i < N) { struct blasfeo_dvec *dyn_adj = config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]); blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, mem->dyn_adj + i, 0); } else { blasfeo_dvecse(nu[N] + nx[N], 0.0, mem->dyn_adj + N, 0); } if (i > 0) { struct blasfeo_dvec *dyn_adj = config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]); blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], mem->dyn_adj+i, nu[i], mem->dyn_adj+i, nu[i]); } // nlp mem: ineq_fun struct blasfeo_dvec *ineq_fun = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); blasfeo_dveccp(2 * ni[i], ineq_fun, 0, mem->ineq_fun + i, 0); // nlp mem: ineq_adj struct blasfeo_dvec *ineq_adj = config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]); blasfeo_dveccp(nv[i], ineq_adj, 0, mem->ineq_adj + i, 0); } for (i = 0; i <= N; i++) { // TODO(rien) where should the update happen??? move to qp update ??? // TODO(all): fix and move where appropriate // if (i<N) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme != NULL && opts->scheme->type != exact) // { // for (int_t j = 0; j < nx; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j]; // for (int_t j = 0; j < nu; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j]; // } // } } return; } // update QP rhs for SQP (step prim var, abs dual var) // TODO(all): move in dynamics, cost, constraints modules ??? void ocp_nlp_approximate_qp_vectors_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // g blasfeo_dveccp(nv[i], mem->cost_grad + i, 0, mem->qp_in->rqz + i, 0); // b if (i < N) blasfeo_dveccp(nx[i + 1], mem->dyn_fun + i, 0, mem->qp_in->b + i, 0); // d blasfeo_dveccp(2 * ni[i], mem->ineq_fun + i, 0, mem->qp_in->d + i, 0); } return; } void ocp_nlp_embed_initial_value(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int *ni = dims->ni; // constraints config->constraints[0]->bounds_update(config->constraints[0], dims->constraints[0], in->constraints[0], opts->constraints[0], mem->constraints[0], work->constraints[0]); // nlp mem: ineq_fun struct blasfeo_dvec *ineq_fun = config->constraints[0]->memory_get_fun_ptr(mem->constraints[0]); blasfeo_dveccp(2 * ni[0], ineq_fun, 0, mem->ineq_fun, 0); // d blasfeo_dveccp(2 * ni[0], mem->ineq_fun, 0, mem->qp_in->d, 0); return; } double ocp_nlp_evaluate_merit_fun(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i, j; int N = dims->N; int *nx = dims->nx; int *ni = dims->ni; double merit_fun = 0.0; // compute fun value #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i<=N; i++) { // cost config->cost[i]->compute_fun(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); } #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i<N; i++) { // dynamics config->dynamics[i]->compute_fun(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); } #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i<=N; i++) { // constr config->constraints[i]->compute_fun(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } double *tmp_fun; double tmp; struct blasfeo_dvec *tmp_fun_vec; double cost_fun = 0.0; for(i=0; i<=N; i++) { tmp_fun = config->cost[i]->memory_get_fun_ptr(mem->cost[i]); cost_fun += *tmp_fun; } double dyn_fun = 0.0; for(i=0; i<N; i++) { tmp_fun_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); // printf("\nMerit: dyn will multiply tmp_fun, weights\n"); // blasfeo_print_exp_tran_dvec(nx[i+1], tmp_fun_vec, 0); // blasfeo_print_exp_tran_dvec(nx[i+1], work->weight_merit_fun->pi+i, 0); for(j=0; j<nx[i+1]; j++) { // printf("\n%e %e\n", fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)), fabs(BLASFEO_DVECEL(tmp_fun_vec, j))); dyn_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)) * fabs(BLASFEO_DVECEL(tmp_fun_vec, j)); } } double constr_fun = 0.0; for(i=0; i<=N; i++) { // printf("\ni %d\n", i); tmp_fun_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); // blasfeo_print_exp_tran_dvec(2*ni[i], tmp_fun_vec, 0); // blasfeo_print_exp_tran_dvec(2*ni[i], work->weight_merit_fun->lam+i, 0); for(j=0; j<2*ni[i]; j++) { tmp = BLASFEO_DVECEL(tmp_fun_vec, j); tmp = tmp>0.0 ? fabs(tmp) : 0.0; // tmp = constraint violation // printf("IN merit fun: ineq i %d, j %d tmp_fun%e, multiplier %e\n", i, j, BLASFEO_DVECEL(tmp_fun_vec, j), BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)); constr_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)) * tmp; } } merit_fun = cost_fun + dyn_fun + constr_fun; // printf("\nMerit fun: %e cost: %e dyn: %e constr: %e\n", merit_fun, cost_fun, dyn_fun, constr_fun); return merit_fun; } static double ocp_nlp_line_search(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *ni = dims->ni; double alpha = opts->step_length; double tmp0, tmp1; int j; #if 0 // Line Search Gianluca version // current point for (i = 0; i <= N; i++) blasfeo_dveccp(nv[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); for (i = 0; i < N; i++) blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->tmp_nlp_out->pi+i, 0); for (i = 0; i <= N; i++) blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->tmp_nlp_out->lam+i, 0); // linear update of algebraic variables using state and input sensitivity // if (i < N) // { // blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0); // } // initialize weights if(mem->sqp_iter[0]==0) { for (i = 0; i < N; i++) blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->weight_merit_fun->pi+i, 0); for (i = 0; i <= N; i++) blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->weight_merit_fun->lam+i, 0); } // update weigths for (i = 0; i < N; i++) { for(j=0; j<nx[i+1]; j++) { tmp0 = fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)); tmp1 = 0.5 * (tmp0 + fabs(BLASFEO_DVECEL(mem->qp_out->pi+i, j))); BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } for (i = 0; i <= N; i++) { for(j=0; j<2*ni[i]; j++) { tmp0 = fabs(BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)); tmp1 = 0.5 * (tmp0 + fabs(BLASFEO_DVECEL(mem->qp_out->lam+i, j))); BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } printf("\n\nmerit fun value\n"); double merit_fun0 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); double alpha_min = 0.1; for (j=0; j<10 & alpha>alpha_min; j++) { for (i = 0; i <= N; i++) blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); printf("\n%d tmp merit fun value\n", j); double merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); if(merit_fun1 < merit_fun0) { break; } else { alpha *= 0.7; } } printf("\nalpha %f\n", alpha); #endif if (opts->globalization == MERIT_BACKTRACKING) { // Line search version Jonathan // Following Leineweber1999 // copy out (current iterate) to work->tmp_nlp_out for (i = 0; i <= N; i++) blasfeo_dveccp(nv[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); for (i = 0; i < N; i++) blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->tmp_nlp_out->pi+i, 0); for (i = 0; i <= N; i++) blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->tmp_nlp_out->lam+i, 0); // linear update of algebraic variables using state and input sensitivity // if (i < N) // { // blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0); // } /* initialize (Leineweber1999 M5.1) */ if (mem->sqp_iter[0]==0) { // initialize weights // equality merit weights = abs( eq multipliers ) for (i = 0; i < N; i++) { for (j=0; j<nx[i+1]; j++) { tmp0 = fabs(BLASFEO_DVECEL(out->pi+i, j)); BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0; } } // printf("merit fun: initialize weights lam\n"); for (i = 0; i <= N; i++) { blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->weight_merit_fun->lam+i, 0); // blasfeo_print_dvec(nx[i+1], work->weight_merit_fun->lam+i, 0); } } else { // update weights // printf("merit fun: update weights, sqp_iter = %d\n", mem->sqp_iter[0]); for (i = 0; i < N; i++) { for(j=0; j<nx[i+1]; j++) { // abs(lambda) (LW) tmp0 = fabs(BLASFEO_DVECEL(out->pi+i, j)); // .5 * (abs(lambda) + sigma) tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)); BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } for (i = 0; i <= N; i++) { for(j=0; j<2*ni[i]; j++) { // mu (LW) tmp0 = BLASFEO_DVECEL(out->lam+i, j); // .5 * (mu + tau) tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)); BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } } if (1) // (mem->sqp_iter[0]!=0) // TODO: why does Leineweber do full step in first SQP iter? { double merit_fun0 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); double alpha_min = 0.1; // TODO(oj): add alpha_min and alpha_reduction factor [0.7] to options. /* actual Line Search*/ alpha = 1.0; // TODO: check out more advanced step search Leineweber1995 for (j=0; alpha>alpha_min; j++) { for (i = 0; i <= N; i++) blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); // printf("\ntmp merit fun value step search iter: %d", j); double merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); // TODO(oj): also check Armijo-type condition Leinweber1999 (2.35) if (merit_fun1 < merit_fun0) { break; } else { alpha *= 0.7; } } } printf("\nalpha %f\n", alpha); } return alpha; } void ocp_nlp_update_variables_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; // step length double alpha = ocp_nlp_line_search(config, dims, in, out, opts, mem, work); #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // step in primal variables blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux + i, 0, out->ux + i, 0, out->ux + i, 0); // update dual variables if (i < N) { blasfeo_dvecsc(nx[i+1], 1.0-alpha, out->pi+i, 0); blasfeo_daxpy(nx[i+1], alpha, mem->qp_out->pi+i, 0, out->pi+i, 0, out->pi+i, 0); } blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->lam+i, 0); blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->lam+i, 0, out->lam+i, 0, out->lam+i, 0); // update slack values blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->t+i, 0); blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->t+i, 0, out->t+i, 0, out->t+i, 0); // linear update of algebraic variables using state and input sensitivity if (i < N) { blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0); } } return; } /************************************************ * residuals ************************************************/ int ocp_nlp_res_calculate_size(ocp_nlp_dims *dims) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int size = sizeof(ocp_nlp_res); size += 3 * (N + 1) * sizeof(struct blasfeo_dvec); // res_g res_d res_m size += 1 * N * sizeof(struct blasfeo_dvec); // res_b for (int ii = 0; ii < N; ii++) { size += 1 * blasfeo_memsize_dvec(nv[ii]); // res_g size += 1 * blasfeo_memsize_dvec(nx[ii + 1]); // res_b size += 2 * blasfeo_memsize_dvec(2 * ni[ii]); // res_d res_m } size += 1 * blasfeo_memsize_dvec(nv[N]); // res_g size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // res_d res_m size += 8; // initial align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_res *ocp_nlp_res_assign(ocp_nlp_dims *dims, void *raw_memory) { char *c_ptr = (char *) raw_memory; // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_res *res = (ocp_nlp_res *) c_ptr; c_ptr += sizeof(ocp_nlp_res); // blasfeo_struct align align_char_to(8, &c_ptr); // res_g assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_g, &c_ptr); // res_b assign_and_advance_blasfeo_dvec_structs(N, &res->res_b, &c_ptr); // res_d assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_d, &c_ptr); // res_m assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_m, &c_ptr); // blasfeo_mem align align_char_to(64, &c_ptr); // res_g for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nv[ii], res->res_g + ii, &c_ptr); } // res_b for (int ii = 0; ii < N; ii++) { assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], res->res_b + ii, &c_ptr); } // res_d for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], res->res_d + ii, &c_ptr); } // res_m for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], res->res_m + ii, &c_ptr); } res->memsize = ocp_nlp_res_calculate_size(dims); return res; } void ocp_nlp_res_compute(ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_res *res, ocp_nlp_memory *mem) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; double tmp_res; // res_g res->inf_norm_res_g = 0.0; for (int ii = 0; ii <= N; ii++) { blasfeo_daxpy(nv[ii], -1.0, mem->ineq_adj + ii, 0, mem->cost_grad + ii, 0, res->res_g + ii, 0); blasfeo_daxpy(nu[ii] + nx[ii], -1.0, mem->dyn_adj + ii, 0, res->res_g + ii, 0, res->res_g + ii, 0); blasfeo_dvecnrm_inf(nv[ii], res->res_g + ii, 0, &tmp_res); res->inf_norm_res_g = tmp_res > res->inf_norm_res_g ? tmp_res : res->inf_norm_res_g; } // res_b res->inf_norm_res_b = 0.0; for (int ii = 0; ii < N; ii++) { blasfeo_dveccp(nx[ii + 1], mem->dyn_fun + ii, 0, res->res_b + ii, 0); blasfeo_dvecnrm_inf(nx[ii + 1], res->res_b + ii, 0, &tmp_res); res->inf_norm_res_b = tmp_res > res->inf_norm_res_b ? tmp_res : res->inf_norm_res_b; } // res_d res->inf_norm_res_d = 0.0; for (int ii = 0; ii <= N; ii++) { blasfeo_daxpy(2 * ni[ii], 1.0, out->t + ii, 0, mem->ineq_fun + ii, 0, res->res_d + ii, 0); blasfeo_dvecnrm_inf(2 * ni[ii], res->res_d + ii, 0, &tmp_res); res->inf_norm_res_d = tmp_res > res->inf_norm_res_d ? tmp_res : res->inf_norm_res_d; } // res_m res->inf_norm_res_m = 0.0; for (int ii = 0; ii <= N; ii++) { blasfeo_dvecmul(2 * ni[ii], out->lam + ii, 0, out->t + ii, 0, res->res_m + ii, 0); blasfeo_dvecnrm_inf(2 * ni[ii], res->res_m + ii, 0, &tmp_res); res->inf_norm_res_m = tmp_res > res->inf_norm_res_m ? tmp_res : res->inf_norm_res_m; } return; }
doallchar-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // one dimension array computation // with finer granularity than traditional 4 bytes. // Dynamic tools looking at 4-bytes elements may wrongfuly report race condition. // // Liao 2/7/2017 char a[100]; int main() { int i; #pragma omp parallel for for (i=0;i<100;i++) a[i]=a[i]+1; return 0; }
common.h
#ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <cstdio> #include <string> #include <vector> #include <sstream> #include <cstdint> #include <algorithm> #include <cmath> #include <functional> #include <memory> #include <iterator> #include <type_traits> #include <iomanip> namespace LightGBM { namespace Common { inline char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string& Trim(std::string& str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string& RemoveQuotationSymbol(std::string& str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = str.find(delimiter); while (pos != std::string::npos) { ret.push_back(str.substr(i, pos - i)); i = ++pos; pos = str.find(delimiter, pos); } ret.push_back(str.substr(i)); return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { // will split when met any chars in delimiters std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = str.find_first_of(delimiters); while (pos != std::string::npos) { ret.push_back(str.substr(i, pos - i)); i = ++pos; pos = str.find_first_of(delimiters, pos); } ret.push_back(str.substr(i)); return ret; } inline static std::string FindFromLines(const std::vector<std::string>& lines, const char* key_word) { for (auto& line : lines) { size_t find_pos = line.find(key_word); if (find_pos != std::string::npos) { return line; } } return ""; } inline static const char* Atoi(const char* p, int* out) { int sign, value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = sign * value; while (*p == ' ') { ++p; } return p; } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = 0; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double pow10 = 10.0; ++p; while (*p >= '0' && *p <= '9') { value += (*p - '0') / pow10; pow10 *= 10.0; ++p; } } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan")) { *out = 0; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret; for (size_t i = 0; i < arr.size(); ++i) { ret.push_back(static_cast<T2>(arr[i])); } return ret; } template<typename T> inline static std::string ArrayToString(const std::vector<T>& arr, char delimiter) { if (arr.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << arr[0]; for (size_t i = 1; i < arr.size(); ++i) { str_buf << delimiter; str_buf << arr[i]; } return str_buf.str(); } template<typename T> inline static std::string ArrayToString(const std::vector<T>& arr, size_t n, char delimiter) { if (arr.empty() || n == 0) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << arr[0]; for (size_t i = 1; i < std::min(n, arr.size()); ++i) { str_buf << delimiter; str_buf << arr[i]; } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { return static_cast<T>(std::stol(str)); } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter, size_t n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), delimiter); if (strs.size() != n) { Log::Fatal("StringToArray error, size doesn't match."); } std::vector<T> ret(n); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (size_t i = 0; i < n; ++i) { ret[i] = helper(strs[i]); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } static inline int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformaton on p_rec * \param p_rec The input/output vector of the values. */ inline void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (size_t i = 0; i < input.size(); ++i) { ret.push_back(input.at(i).get()); } return ret; } template<typename T1, typename T2> inline void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; for (size_t i = start; i < keys.size(); ++i) { arr.emplace_back(keys[i], values[i]); } if (!is_reverse) { std::sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { keys[i] = arr[i].first; values[i] = arr[i].second; } } /* * approximate hessians of absolute loss with Gaussian function * cf. https://en.wikipedia.org/wiki/Gaussian_function * * y is a prediction. * t means true target. * g means gradient. * eta is a parameter to control the width of Gaussian function. * w means weights. */ inline static double ApproximateHessianWithGaussian(const double y, const double t, const double g, const double eta, const double w=1.0f) { const double diff = y - t; const double pi = 4.0 * std::atan(1.0); const double x = std::fabs(diff); const double a = 2.0 * std::fabs(g) * w; // difference of two first derivatives, (zero to inf) and (zero to -inf). const double b = 0.0; const double c = std::max((std::fabs(y) + std::fabs(t)) * eta, 1.0e-10); return w * std::exp(-(x - b) * (x - b) / (2.0 * c * c)) * a / (c * std::sqrt(2 * pi)); } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>& data) { std::vector<T*> ptr(data.size()); for (size_t i = 0; i < data.size(); ++i) { ptr[i] = data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (x >= std::numeric_limits<double>::max()) { return std::numeric_limits<double>::max(); } else if(x <= std::numeric_limits<double>::lowest()) { return std::numeric_limits<double>::lowest(); } else { return x; } } template<class _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<class _RanIt, class _Pr, class _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static,1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static,1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<class _RanIt, class _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } } // namespace Common } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
ocp_nlp_sqp.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_sqp.h" // external #include <assert.h> #include <math.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif // blasfeo #include "blasfeo/include/blasfeo_d_aux.h" #include "blasfeo/include/blasfeo_d_aux_ext_dep.h" #include "blasfeo/include/blasfeo_d_blas.h" // acados #include "acados/ocp_nlp/ocp_nlp_common.h" #include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h" #include "acados/ocp_nlp/ocp_nlp_reg_common.h" #include "acados/ocp_qp/ocp_qp_common.h" #include "acados/utils/mem.h" #include "acados/utils/print.h" #include "acados/utils/timing.h" #include "acados/utils/types.h" /************************************************ * options ************************************************/ int ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; int size = 0; size += sizeof(ocp_nlp_sqp_opts); size += ocp_nlp_opts_calculate_size(config, dims); return size; } void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; char *c_ptr = (char *) raw_memory; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_opts); opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr); c_ptr += ocp_nlp_opts_calculate_size(config, dims); assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // int ii; // this first !!! ocp_nlp_opts_initialize_default(config, dims, nlp_opts); // SQP opts opts->max_iter = 20; opts->tol_stat = 1e-8; opts->tol_eq = 1e-8; opts->tol_ineq = 1e-8; opts->tol_comp = 1e-8; opts->ext_qp_res = 0; opts->qp_warm_start = 0; opts->warm_start_first_qp = false; opts->rti_phase = 0; opts->print_level = 0; // overwrite default submodules opts // qp tolerance qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", &opts->tol_stat); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", &opts->tol_eq); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", &opts->tol_comp); return; } void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_update(config, dims, nlp_opts); return; } void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { // config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value); ocp_nlp_opts_set(config, nlp_opts, field, value); if (!strcmp(field, "qp_warm_start")) { int* i_ptr = (int *) value; opts->qp_warm_start = *i_ptr; } } else // nlp opts { if (!strcmp(field, "max_iter")) { int* max_iter = (int *) value; opts->max_iter = *max_iter; } else if (!strcmp(field, "tol_stat")) { double* tol_stat = (double *) value; opts->tol_stat = *tol_stat; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", value); } else if (!strcmp(field, "tol_eq")) { double* tol_eq = (double *) value; opts->tol_eq = *tol_eq; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", value); } else if (!strcmp(field, "tol_ineq")) { double* tol_ineq = (double *) value; opts->tol_ineq = *tol_ineq; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", value); } else if (!strcmp(field, "tol_comp")) { double* tol_comp = (double *) value; opts->tol_comp = *tol_comp; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", value); } else if (!strcmp(field, "ext_qp_res")) { int* ext_qp_res = (int *) value; opts->ext_qp_res = *ext_qp_res; } else if (!strcmp(field, "warm_start_first_qp")) { bool* warm_start_first_qp = (bool *) value; opts->warm_start_first_qp = *warm_start_first_qp; } else if (!strcmp(field, "rti_phase")) { int* rti_phase = (int *) value; if (*rti_phase < 0 || *rti_phase > 0) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field."); printf("possible values are: 0\n"); exit(1); } else opts->rti_phase = *rti_phase; } else if (!strcmp(field, "print_level")) { int* print_level = (int *) value; if (*print_level < 0) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level); exit(1); } opts->print_level = *print_level; } else { ocp_nlp_opts_set(config, nlp_opts, field, value); // printf("\nerror: ocp_nlp_sqp_opts_set: wrong field: %s\n", field); // exit(1); } } return; } void ocp_nlp_sqp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value); return; } /************************************************ * memory ************************************************/ int ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; int size = 0; size += sizeof(ocp_nlp_sqp_memory); // nlp res size += ocp_nlp_res_calculate_size(dims); // nlp mem size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat int stat_m = opts->max_iter+1; int stat_n = 6; if (opts->ext_qp_res) stat_n += 4; size += stat_n*stat_m*sizeof(double); size += 8; // initial align make_int_multiple_of(8, &size); return size; } void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; char *c_ptr = (char *) raw_memory; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; // initial align align_char_to(8, &c_ptr); ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_memory); // nlp res mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr); c_ptr += mem->nlp_res->memsize; // nlp mem mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr); c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat mem->stat = (double *) c_ptr; mem->stat_m = opts->max_iter+1; mem->stat_n = 6; if (opts->ext_qp_res) mem->stat_n += 4; c_ptr += mem->stat_m*mem->stat_n*sizeof(double); mem->status = ACADOS_READY; assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr); return mem; } /************************************************ * workspace ************************************************/ int ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int size = 0; // sqp size += sizeof(ocp_nlp_sqp_workspace); // nlp size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // tmp qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // tmp qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } return size; } static void ocp_nlp_sqp_cast_workspace(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_workspace *work) { ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // sqp char *c_ptr = (char *) work; c_ptr += sizeof(ocp_nlp_sqp_workspace); // nlp work->nlp_work = ocp_nlp_workspace_assign(config, dims, nlp_opts, nlp_mem, c_ptr); c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // tmp qp in work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // tmp qp out work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr); return; } /************************************************ * functions ************************************************/ int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer0, timer1; acados_tic(&timer0); ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; // zero timers double total_time = 0.0; double tmp_time; mem->time_qp_sol = 0.0; mem->time_qp_solver_call = 0.0; mem->time_qp_xcond = 0.0; mem->time_lin = 0.0; mem->time_reg = 0.0; mem->time_tot = 0.0; int N = dims->N; int ii; int qp_iter = 0; int qp_status = 0; #if defined(ACADOS_WITH_OPENMP) // backup number of threads int num_threads_bkp = omp_get_num_threads(); // set number of threads omp_set_num_threads(opts->nlp_opts->num_threads); #pragma omp parallel { // beginning of parallel region #endif // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux1_ptr(nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_pi_ptr(nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_BAbt_ptr(nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_dzduxt_ptr(nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_sim_guess_ptr(nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]); } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii <= N; ii++) { config->cost[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_Z_ptr(nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii <= N; ii++) { config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_lam_ptr(nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_DCt_ptr(nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxb_ptr(nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxs_rev_ptr(nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxe_ptr(nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr(dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem); config->regularize->memory_set_rq_ptr(dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem); config->regularize->memory_set_BAbt_ptr(dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem); config->regularize->memory_set_b_ptr(dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem); config->regularize->memory_set_idxb_ptr(dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem); config->regularize->memory_set_DCt_ptr(dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem); config->regularize->memory_set_ux_ptr(dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem); config->regularize->memory_set_pi_ptr(dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem); config->regularize->memory_set_lam_ptr(dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif // NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute; // -> remove here and make sure precompute is called everywhere (e.g. Python interface). for (ii = 0; ii < N; ii++) { config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif // initialize QP ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // main sqp loop int sqp_iter = 0; nlp_mem->sqp_iter = &sqp_iter; for (; sqp_iter < opts->max_iter; sqp_iter++) { if (opts->print_level > 0) { printf("\n------- sqp iter %d (max_iter %d) --------\n", sqp_iter, opts->max_iter); if (opts->print_level > sqp_iter + 1) print_ocp_qp_in(nlp_mem->qp_in); } // linearizate NLP and update QP matrices acados_tic(&timer1); ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_lin += acados_toc(&timer1); // update QP rhs for SQP (step prim var, abs dual var) ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // compute nlp residuals ocp_nlp_res_compute(dims, nlp_in, nlp_out, mem->nlp_res, nlp_mem); nlp_out->inf_norm_res = mem->nlp_res->inf_norm_res_g; nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_b > nlp_out->inf_norm_res) ? mem->nlp_res->inf_norm_res_b : nlp_out->inf_norm_res; nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_d > nlp_out->inf_norm_res) ? mem->nlp_res->inf_norm_res_d : nlp_out->inf_norm_res; nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_m > nlp_out->inf_norm_res) ? mem->nlp_res->inf_norm_res_m : nlp_out->inf_norm_res; // save statistics if (sqp_iter < mem->stat_m) { mem->stat[mem->stat_n*sqp_iter+0] = mem->nlp_res->inf_norm_res_g; mem->stat[mem->stat_n*sqp_iter+1] = mem->nlp_res->inf_norm_res_b; mem->stat[mem->stat_n*sqp_iter+2] = mem->nlp_res->inf_norm_res_d; mem->stat[mem->stat_n*sqp_iter+3] = mem->nlp_res->inf_norm_res_m; } // exit conditions on residuals if ((mem->nlp_res->inf_norm_res_g < opts->tol_stat) & (mem->nlp_res->inf_norm_res_b < opts->tol_eq) & (mem->nlp_res->inf_norm_res_d < opts->tol_ineq) & (mem->nlp_res->inf_norm_res_m < opts->tol_comp)) { // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // stop timer total_time += acados_toc(&timer0); // save time nlp_out->total_time = total_time; mem->time_tot = total_time; #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_SUCCESS; return mem->status; } // regularize Hessian acados_tic(&timer1); config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // (typically) no warm start at first iteration if (sqp_iter == 0 && !opts->warm_start_first_qp) { int tmp_int = 0; config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &tmp_int); } // solve qp acados_tic(&timer1); qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); mem->time_qp_sol += acados_toc(&timer1); qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time); mem->time_qp_solver_call += tmp_time; qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time); mem->time_qp_xcond += tmp_time; // compute correct dual solution in case of Hessian regularization acados_tic(&timer1); config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // restore default warm start if (sqp_iter==0) { config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &opts->qp_warm_start); } // TODO move into QP solver memory ??? qp_info *qp_info_; ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_); nlp_out->qp_iter = qp_info_->num_iter; // printf("\nqp_iter = %d, sqp_iter = %d, max_sqp_iter = %d\n", nlp_out->qp_iter, sqp_iter, opts->max_iter); qp_iter = qp_info_->num_iter; // save statistics of last qp solver call if (sqp_iter+1 < mem->stat_m) { mem->stat[mem->stat_n*(sqp_iter+1)+4] = qp_status; mem->stat[mem->stat_n*(sqp_iter+1)+5] = qp_iter; } // compute external QP residuals (for debugging) if (opts->ext_qp_res) { ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws); if (sqp_iter+1 < mem->stat_m) ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6)); } if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { // print_ocp_qp_in(nlp_mem->qp_in); // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // stop timer total_time += acados_toc(&timer0); // save time mem->time_tot = total_time; nlp_out->total_time = total_time; printf("QP solver returned error status %d in iteration %d\n", qp_status, sqp_iter); #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif if (opts->print_level > 1) { printf("\n Failed to solve the following QP:\n"); if (opts->print_level > sqp_iter + 1) print_ocp_qp_in(nlp_mem->qp_in); } mem->status = ACADOS_QP_FAILURE; return mem->status; } ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // ocp_nlp_dims_print(nlp_out->dims); // ocp_nlp_out_print(nlp_out); // exit(1); // ??? @rien // for (int_t i = 0; i < N; i++) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme == NULL) // continue; // opts->sens_adj = (opts->scheme->type != exact); // if (nlp_in->freezeSens) { // // freeze inexact sensitivities after first SQP iteration !! // opts->scheme->freeze = true; // } // } if (opts->print_level > 0) { printf("Residuals: stat: %e, eq: %e, ineq: %e, comp: %e.\n", mem->nlp_res->inf_norm_res_g, mem->nlp_res->inf_norm_res_b, mem->nlp_res->inf_norm_res_d, mem->nlp_res->inf_norm_res_m ); } } // stop timer total_time += acados_toc(&timer0); // ocp_nlp_out_print(nlp_out); // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // save time mem->time_tot = total_time; nlp_out->total_time = total_time; // maximum number of iterations reached #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_MAXITER; printf("\n ocp_nlp_sqp: maximum iterations reached\n"); if (opts->print_level > 0) { printf("Residuals: stat: %e, eq: %e, ineq: %e, comp: %e.\n", mem->nlp_res->inf_norm_res_g, mem->nlp_res->inf_norm_res_b, mem->nlp_res->inf_norm_res_d, mem->nlp_res->inf_norm_res_m ); } return mem->status; } int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; // ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int N = dims->N; int status = ACADOS_SUCCESS; int ii; // TODO(all) add flag to enable/disable checks for (ii = 0; ii <= N; ii++) { int module_val; config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val); if (dims->ns[ii] != module_val) { printf("ocp_nlp_sqp_precompute: inconsistent dimension ns for stage %d with constraint module, got %d, module: %d.", ii, dims->ns[ii], module_val); exit(1); } } // precompute for (ii = 0; ii < N; ii++) { // set T config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); // dynamics precompute status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii], nlp_mem->dynamics[ii], nlp_work->dynamics[ii]); if (status != ACADOS_SUCCESS) return status; } return status; } void ocp_nlp_sqp_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_, char *field, int stage, int index, void *sens_nlp_out_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_out *sens_nlp_out = sens_nlp_out_; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in); d_ocp_qp_set_rhs_zero(work->tmp_qp_in); double one = 1.0; if ((!strcmp("ex", field)) & (stage==0)) { d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in); d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in); // d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in); config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); // d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out); // exit(1); /* copy tmp_qp_out into sens_nlp_out */ int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; for (i = 0; i <= N; i++) { blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0); if (i < N) blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0); } } else { printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_eval_param_sens\n", field, stage); exit(1); } return; } // TODO rename memory_get ??? void ocp_nlp_sqp_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_) { ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; ocp_nlp_sqp_memory *mem = mem_; if (!strcmp("sqp_iter", field)) { int *value = return_value_; *value = mem->sqp_iter; } else if (!strcmp("status", field)) { int *value = return_value_; *value = mem->status; } else if (!strcmp("time_tot", field) || !strcmp("tot_time", field)) { double *value = return_value_; *value = mem->time_tot; } else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field)) { double *value = return_value_; *value = mem->time_qp_sol; } else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field)) { double *value = return_value_; *value = mem->time_qp_solver_call; } else if (!strcmp("time_qp_xcond", field)) { double *value = return_value_; *value = mem->time_qp_xcond; } else if (!strcmp("time_lin", field)) { double *value = return_value_; *value = mem->time_lin; } else if (!strcmp("time_reg", field)) { double *value = return_value_; *value = mem->time_reg; } else if (!strcmp("time_sim", field) || !strcmp("time_sim_ad", field) || !strcmp("time_sim_la", field)) { double tmp = 0.0; double *ptr = return_value_; int N = dims->N; int ii; for (ii=0; ii<N; ii++) { config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], field, &tmp); *ptr += tmp; } } else if (!strcmp("nlp_res", field)) { ocp_nlp_res **value = return_value_; *value = mem->nlp_res; } else if (!strcmp("stat", field)) { double **value = return_value_; *value = mem->stat; } else if (!strcmp("statistics", field)) { int n_row = mem->stat_m<mem->sqp_iter+1 ? mem->stat_m : mem->sqp_iter+1; double *value = return_value_; for (int ii=0; ii<n_row; ii++) { value[ii+0] = ii; for (int jj=0; jj<mem->stat_n; jj++) value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n]; } } else if (!strcmp("stat_m", field)) { int *value = return_value_; *value = mem->stat_m; } else if (!strcmp("stat_n", field)) { int *value = return_value_; *value = mem->stat_n; } else if (!strcmp("nlp_mem", field)) { void **value = return_value_; *value = mem->nlp_mem; } else if (!strcmp("qp_xcond_dims", field)) { void **value = return_value_; *value = dims->qp_solver->xcond_dims; } else if (!strcmp("qp_xcond_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_in; } else if (!strcmp("qp_xcond_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_out; } else if (!strcmp("qp_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_in; } else if (!strcmp("qp_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_out; } else if (!strcmp("qp_iter", field)) { config->qp_solver->memory_get(config->qp_solver, mem->nlp_mem->qp_solver_mem, "iter", return_value_); } else { printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field); exit(1); } } void ocp_nlp_sqp_config_initialize_default(void *config_) { ocp_nlp_config *config = (ocp_nlp_config *) config_; config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size; config->opts_assign = &ocp_nlp_sqp_opts_assign; config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default; config->opts_update = &ocp_nlp_sqp_opts_update; config->opts_set = &ocp_nlp_sqp_opts_set; config->opts_set_at_stage = &ocp_nlp_sqp_opts_set_at_stage; config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size; config->memory_assign = &ocp_nlp_sqp_memory_assign; config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size; config->evaluate = &ocp_nlp_sqp; config->eval_param_sens = &ocp_nlp_sqp_eval_param_sens; config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default; config->precompute = &ocp_nlp_sqp_precompute; config->get = &ocp_nlp_sqp_get; return; }
amplici.c
/** * @file amplici.c * @author Xiyu Peng * * AmpliCI: Method to identify true sequences (haplotypes) in a sample of * Illumina amplicon data (from FASTQ file). * * [TODO] * 1. check reads with unequal lengths and report error * Note the data structure actually supports reads with unequal length * but ampliCI model does not. * 2. format your code nicely! Inspiration?: * https://www.kernel.org/doc/html/v4.10/process/coding-style.html * 3. Assigning reads to haplotypes (given or estimated) and output the abundance table * 4. Correct the comments and output in the code. We actually filter on maximum * conditional log likelihood (maximum posterior assignment probability), * not the reads log likelihood. * 5. use number of observed quality scores, not maxQ- minQ */ #include <string.h> #include <stdlib.h> #include <float.h> #include <stdio.h> #include <math.h> #define MATHLIB_STANDALONE 1 //#define DEBUG 0 //#define ADJUST_PVALUE //#define STORE_FP #include <Rmath.h> #include "amplici.h" #include "ampliclust.h" #include "initialize.h" #include "statistics.h" #include "lmath.h" #include "io.h" #include "hash.h" #include "align.h" #include "error.h" #include "error_est.h" /* amplici */ int amplici_malloc(options *opt, data *dat, initializer *ini, unsigned int **array_fp, double **fp_abun, double **fp_trans, unsigned char **** nw_result, size_t **nw_alen,unsigned int K_space, unsigned n_candidate); int amplici_realloc(options *opt, initializer *ini, model *mod, unsigned int **array_fp, double **fp_abun, double **fp_trans, unsigned int preK, unsigned int K, unsigned int pre_nfp, unsigned int nfp, size_t sample_size, unsigned int hash_length,unsigned int max_read_length); void amplici_free(unsigned int *array_fp, double *fp_abun, double *fp_trans, unsigned char *** nw_result, size_t *nw_alen, unsigned int nw_size); /* estimated scaled true abundance */ int expected_TrueAbundance(options *opt, data *dat, double *H_abun, double *e_trans, double *self_trans,double *abun_true, size_t *idx, unsigned int count_i,unsigned int select, unsigned int i, int conve, double low_bound); double iterate_expected_true_abundance(unsigned int sample_size, size_t *idx_array, double *e_trans, double *self_trans, double *H_abun, unsigned int select, unsigned int obs_abun, double true_abun); /* check false positives */ int evaluate_haplotype(options *opt, data *dat, model *mod, initializer *ini, unsigned int K, double low_bound, double * error_profile, unsigned int ord, unsigned int n_candidates, int *fp,int final); int check_fp_with_indels(options *opt, data *dat, model *mod, initializer *ini,unsigned int select, double low_bound, double * error_profile, int *fp); int Est_pi(initializer *ini, double *pi, size_t sample_size, unsigned int K, int reassign); int abun_pvalue(options *opt, initializer *ini, size_t *idx_array, double *e_trans, unsigned int count, unsigned int select, unsigned int threshold, double *p, size_t sample_size, int partial); /* transition prob with or without alignment */ double trans_nw(options *opt,unsigned char **align, size_t alen, unsigned int mismatch, unsigned int ngap, double *error_profile,int err_encoding, unsigned char *rqmat, unsigned char n_quality, double adj, unsigned int rlen, double *error_prob); int Expected_SelfTrans(options *opt, data *dat, double *self_trans,double *error_profile, int err_encoding, double adj); int ExpTrans_nogap(data *dat, options *opt, initializer *ini,unsigned int H_id, unsigned int select, double *error_profile, int err_encoding); int ExpTrans_nwalign(data *dat, options *opt, initializer *ini, unsigned char ***nw_result, size_t *nw_alen,unsigned int select, double *error_profile, int err_encoding,unsigned int H_id, double adj); /** * Cluster amplicon sequences. * * @param ini pointer to initializer object * @param dat pointer data object * @param opt pointer to options object * @param mod pointer to model object * @param ri pointer to run_info object * * return err status **/ int ampliCI(options * opt, data * dat, model *mod, initializer *ini, run_info *ri) { int err = NO_ERROR; int output_hap = 1; /* output haplotype FASTA file */ int use_size = 0; /* output cluster sizes for abundance */ /* K to be determined in the function below, opt->K will be changed */ if ((err = haplotype_selection(opt, dat, mod, ini, opt->K_max))) return err; if ((err = realloc_run_info(ri, dat->sample_size, opt->K + 1, 0, 1))) return err; /* assign cluster based on ll */ assign_clusters(mod->eik, opt->K, dat->sample_size, ri->optimal_cluster_size, ri->optimal_cluster_id, 1); /* remove reads with too-small log likelihood (Note: ll are alignment-free) */ /* [KSD] For some reason you do not actually use read log likelihood. * Instead, you use the maximum conditional log likelihood. */ /* Since it cannot use posterior probabilities. they sum to 1 across all clusters */ /* [KSD] I was just proposing that this function do as advertised. */ likelihood_filter(opt->K, opt->ll_cutoff, NULL, mod->pi, ini->e_trans, dat->sample_size, ri); char *outfile_hap = NULL; char *outfile = NULL; if (opt->outfile_base || opt->outfile_info) { FILE *fp = NULL; if (!opt->outfile_info) { outfile = malloc((strlen(opt->outfile_base) + 5) * sizeof (char)); if (!outfile) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "output file"); strcpy(outfile, opt->outfile_base); strcat(outfile, ".out"); opt->outfile_info = outfile; } fp = fopen(opt->outfile_info, "w"); if (!fp) return mmessage(ERROR_MSG, FILE_OPEN_ERROR, opt->outfile_info); fprintf(fp, "K: %i\n", opt->K); fprintf(fp, "assignments: "); fprint_assignment(fp, ri->optimal_cluster_id, dat->sample_size, opt->K, 2, 1); fprintf(fp, "cluster sizes: "); fprint_uints(fp, ri->optimal_cluster_size, opt->K, 3, 1); fprintf(fp,"pi: "); for(unsigned int k = 0; k < opt->K; ++k) mod->pi[k] = exp(mod->pi[k]); fprint_doubles(fp, mod->pi, opt->K ,6,1); fprintf(fp,"reads ll: "); fprint_doubles(fp, ri->optimal_cluster_ll, dat->sample_size, 3, 1); //[TODO] output ini->seeds directly fprint_fasta(fp, ini->seeds[0], opt->K, dat->max_read_length, ini->seed_lengths, "H"); fprintf(fp,"ee: "); // mean expected number of errors fprint_doubles(fp, ini->H_ee, opt->K ,3,1); fprintf(fp, "uniq seq id: "); fprint_uints(fp, ini->H, opt->K, 3, 1); fprintf(fp, "scaled true abun: "); fprint_doubles(fp, ini->H_abun, opt->K, 3, 1); fprintf(fp, "obser abun: "); for (unsigned k = 0; k < opt->K; k++) fprintf(fp, " %*u", 3, ini->uniq_seq_count[ini->H[k]]); fprintf(fp, "\n"); #ifdef ABUN_INTERVAL fprintf(fp, "p value: "); for (unsigned k = 0; k < opt->K; k++){ double pvalue = exp(ini->H_pvalue[k]); if (pvalue < 1e-3) fprintf(fp, " %8.2e", pvalue); else fprintf(fp, " %.3f", pvalue); } fprintf(fp, "\n"); #endif if (opt->JC69_model) { fprintf(fp, "Estimated common ancestor: \n"); fprint_fasta(fp, mod->est_ancestor, 1, dat->max_read_length, &dat->max_read_length, "Ancestor"); fprintf(fp, "Evolution_rate: "); fprint_doubles(fp, mod->distance, opt->K, 3, 1); fprintf(fp, "log likelihood from JC69 model:%f\n", mod->JC_ll); } fprintf(fp, "log likelihood: %f\n", mod->best_ll); fprintf(fp, "Diagnostic Probability threshold: %8.2e\n", opt->p_threshold); fprintf(fp, "aic: %f\n", mod->aic); fprintf(fp, "bic: %f\n", mod->bic); fclose(fp); mmessage(INFO_MSG, NO_ERROR, "Output the final result file: " "%s \n", opt->outfile_info); } if(outfile) free(outfile); /* format output fasta file for UCHIME */ if (output_hap) { FILE *fp2 = NULL; if (!opt->outfile_fasta) { if (!opt->outfile_base) return mmessage(ERROR_MSG, INTERNAL_ERROR, "invalid output filenames"); outfile_hap = malloc((strlen(opt->outfile_base) + 5) * sizeof(char)); if (!outfile_hap) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "output file"); strcpy(outfile_hap, opt->outfile_base); strcat(outfile_hap, ".fa"); opt->outfile_fasta = outfile_hap; } fp2 = fopen(opt->outfile_fasta, "w"); if (!fp2) return mmessage(ERROR_MSG, FILE_OPEN_ERROR, opt->outfile_fasta); if (use_size) fprint_haplotypes_size(fp2, ini->seeds, opt->K, ini->seed_lengths, opt->p_threshold, "H", ini->H_pvalue, ri->optimal_cluster_size, ini->H_ee); else fprint_haplotypes_abun(fp2,ini->seeds, opt->K, ini->seed_lengths, opt->p_threshold, "H", ini->H_pvalue, ini->H_abun, ini->H_ee); fclose(fp2); mmessage(INFO_MSG, NO_ERROR, "Output the final haplotype fasta " "file: %s \n", opt->outfile_fasta); } if(outfile_hap) free(outfile_hap); return err; }/* ampliCI */ /** * Select no more than K_max real haplotype sequences * * @param ini initializer object * @param dat data object * @param opt options object * @param mod model object * @param K_max maximum number of haplotypes selected * * return err status **/ int haplotype_selection(options * opt, data * dat, model *mod, initializer *ini, unsigned int K_max) { int err = NO_ERROR; int fxn_debug = opt->info;//DEBUG_I; /* ------------------------------------------------------------------ */ /* Variable Declaration */ double low_bound; int false_positive; double *error_profile = NULL; if (opt->use_error_profile && mod->error_profile) { error_profile = mod->error_profile; debug_msg(DEBUG_II, fxn_debug, "Use error profile. \n"); } if (opt->low_bound > 1) { low_bound = opt->low_bound; } else { low_bound = 2.0; // avoid singletons mmessage(WARNING_MSG, INVALID_USER_INPUT, "User low bound on " "abundance set <= 1: resetting to %f.\n", low_bound); if (opt->contamination_threshold != 1) { opt->contamination_threshold = 1; mmessage(WARNING_MSG, INVALID_USER_INPUT, "resetting contamination threshold %u.", opt->contamination_threshold); } } /* malloc space only when we check false positive */ unsigned int *array_fp = NULL; double *fp_abun = NULL; // just for test double *fp_trans = NULL; // store the trans prob unsigned int select = 0; // num of hap selected unsigned int num_fp = 0; // num of false positive removed unsigned int ord; // index of current select haplotypes unsigned int K_space = opt->K; // current space for K clusters unsigned int ini_K = opt->K; // record the initial K space unsigned int n_candidate = dat->hash_length; /* store result from nw alignment between each unique sequences and newly identified sequence */ unsigned char *** nw_result = NULL; size_t *nw_alen = NULL; /* Determine number of candidates here, just based on the observed abundance * we will only consider seqs with abundance < low bound */ //double adj_low_bound = low_bound - 0.001; // try to solve a BUG here for (unsigned int i =0; i< dat->hash_length; i++){ if (low_bound - ini->uniq_seq_count[i] > DBL_EPSILON * fmax(ini->uniq_seq_count[i], low_bound)) { // AVOID A NUMERICAL BUG n_candidate = i; break; } } if(!n_candidate) return mmessage(ERROR_MSG, INTERNAL_ERROR, "Abundance of all unique sequences are under %f\n",opt->low_bound); /* determine the threshold of diagnostic test */ if (opt->per_candidate) opt->p_threshold = opt->alpha / n_candidate; else opt->p_threshold = opt->alpha; /* Space mallocation */ if (((err= amplici_malloc(opt, dat, ini, &array_fp, &fp_abun, &fp_trans, &nw_result, &nw_alen, K_space, n_candidate)))) return err; /* --------------------------------------------------------------------------- */ /* Initialization: choose the most abundant unique sequences */ select = 0; num_fp = 0; ord = 0; /* initialize the abun_true table with observed count */ for (unsigned int i = 0; i < n_candidate; i++) ini->abun_true[i] = ini->uniq_seq_count[i]; /* calculate the self transition probability */ Expected_SelfTrans(opt, dat, ini->self_trans, error_profile, mod->err_encoding, mod->adj_trunpois); /* select the first haplotype with the highest abundance */ update_seeds(dat, ini, select, ord); debug_msg(DEBUG_I, fxn_debug, "Selecting %d with estimated true" " abundance %.3f\n", ord, ini->H_abun[select]); /* [TODO] parallelize */ if (opt->nw_align == ALIGNMENT_UNIQ_SEQ) ExpTrans_nwalign(dat, opt, ini, nw_result, nw_alen, select, error_profile, mod->err_encoding, ord, mod->adj_trunpois); else ExpTrans_nogap(dat, opt, ini, ord, select, error_profile, mod->err_encoding); /* use the code only when the haplotype chosen without doubt */ select++; ini->abun_true[ord] = 1.0; // to avoid being selected against /* mean expected number of errors for the first haplotype */ if ((err = mean_exp_errors(dat, ini->uniq_seq_idx[ord], ini->uniq_seq_count[ord], &ini->H_ee[0]))) return err; ini->H_pvalue[0] = 0.; // arbitrarly set to 0 /* Initialize aic and bic */ mod->aic = INFINITY; mod->bic = INFINITY; /* seems to work now */ evaluate_haplotype(opt, dat, mod, ini, 1, low_bound, error_profile, 0, n_candidate, &false_positive, 1); /* ------------------------------------------------------------- */ /* choose other haplotypes */ while (K_max > 1) { false_positive = 0; /* If we need more space, we need to realloc the space */ /* For increasing K */ if (select == K_space) { debug_msg(DEBUG_III, fxn_debug, "begin reallocation"); K_space = K_space + ini_K; if ((err = amplici_realloc(opt, ini, mod, &array_fp, &fp_abun, &fp_trans, select, K_space, num_fp, num_fp, dat->sample_size, dat->hash_length, dat->max_read_length))) return err; debug_msg(DEBUG_III, fxn_debug, "Finish reallocation\n"); } /* For increasing num_fp */ #ifdef STORE_FP if (opt->check_false_positive && num_fp == fp_space) { fp_space = fp_space * 2; if ((err = amplici_realloc(opt, ini, mod, &array_fp, &fp_abun, &fp_trans, K_space, K_space, num_fp, fp_space, dat->sample_size, dat->hash_length, dat->max_read_length))) return err; } #endif /* update relative true abundance and * choose a haplotype with the highest relative true abundance */ double max = 0.; /* [TODO] parallelize */ //#pragma omp parallel for for (unsigned int i = 0; i < n_candidate; i++) { if (ini->abun_true[i] >= low_bound) { if ((err = expected_TrueAbundance(opt, dat, ini->H_abun, ini->e_trans, ini->self_trans, ini->abun_true, ini->uniq_seq_idx, ini->uniq_seq_count[i], select, i, opt->convergence_amplici, low_bound))) mmessage(WARNING_MSG, INTERNAL_ERROR, "warning about no convergence " "generated when updating true " "abundance of %i in %ith step \n", i, select + 1); debug_msg(DEBUG_III, fxn_debug, "Estimated " "abundance of %d: %.3f\n", i, ini->abun_true[i]); } } for (unsigned int i =0; i < n_candidate; i++) if (ini->abun_true[i] > max) { ord = i; max = ini->abun_true[i]; } if (ini->abun_true[ord] < low_bound) break; /* select the haplotype temporarily */ update_seeds(dat, ini, select, ord); debug_msg(DEBUG_I, fxn_debug, "Selecting %d with estimated true" " abundance %.3f\n", ord, ini->H_abun[select]); /* Transition prob without alignment free strategy */ if (opt->nw_align == ALIGNMENT_UNIQ_SEQ) ExpTrans_nwalign(dat, opt, ini, nw_result, nw_alen, select, error_profile, mod->err_encoding, ord, mod->adj_trunpois); else if (opt->nw_align == NO_ALIGNMENT) ExpTrans_nogap(dat, opt, ini, ord, select, error_profile, mod->err_encoding); /* computed in evaluate_haplotype() for opt->nw_align == * ALIGNMENT_HAPLOTYPES */ /* evaluate the newly selected haplotype * check if it is a false positive */ if (opt->check_false_positive) { if ((err = evaluate_haplotype(opt, dat, mod, ini, select + 1, low_bound, error_profile, ord, n_candidate, &false_positive, 0))) return err; #if DEBUG debug_msg(DEBUG_I, fxn_debug, "check the %d th unique sequence\n", ord); for (size_t j = 0; j < dat->max_read_length; ++j) fprintf(stderr, "%c", xy_to_char[ini->seeds[select][j]]); fprintf(stderr, "\n"); #endif if (false_positive) { #ifdef STORE_FP array_fp[num_fp] = ord; fp_abun[num_fp] = ini->abun_true[ord]; double * des_sta = &fp_trans[num_fp * dat->sample_size]; double * ori_sta = &ini->e_trans[select * dat->sample_size]; memcpy(des_sta, ori_sta, dat->sample_size * sizeof *des_sta); #endif num_fp ++; ini->abun_true[ord] = 1.0; // To avoid being selected again debug_msg(DEBUG_I, fxn_debug, "remove %d since " "it is a false positive\n", ord); continue; } } select ++; ini->abun_true[ord] = 1.0; if (select == K_max) break; } opt->K = select; // update E matrix, mod->ll, mod->param, mod->bic (modified) , mod->aic (modified) debug_msg(DEBUG_I, fxn_debug, "Final Statistics: \n"); evaluate_haplotype(opt, dat, mod, ini, opt->K, low_bound, error_profile, 0, n_candidate, &false_positive, 1); for (unsigned int k = 0; k < opt->K; k++) { debug_msg(SILENT, fxn_debug, "True abundance of the %ith " "haplotype (%ith unique seq id): %.3f\n", k, ini->H[k], ini->H_abun[k]); debug_msg(SILENT, fxn_debug, "observed abundance of the %ith " "haplotype (%i th unique seq id): %i\n", k, ini->H[k], ini->uniq_seq_count[ini->H[k]]); debug_msg(DEBUG_I, fxn_debug, "The mean exp errors of the %ith " "haplotype (%i th unique seq id): %.3f\n", k, ini->H[k], ini->H_ee[k]); } mod->best_ll = mod->ll; amplici_free(array_fp, fp_abun, fp_trans, nw_result, nw_alen, dat->hash_length); opt->K_space = K_space; return err; }/* haplotype_selection */ /** * Free space if we store arrays for false positives or use ALIGNMENT_UNIQ_SEQ * * @param array_fp sequences of false positives * @param fp_abun estiamted abundance of false positives * @param fp_trans transition prob of false positive * @param nw_result alignment result * @param nw_alen alignment length * @param nw_size num. of alignment result * **/ void amplici_free(unsigned int *array_fp, double *fp_abun, double *fp_trans, unsigned char *** nw_result, size_t *nw_alen, unsigned int nw_size){ if(array_fp)free(array_fp); if(fp_abun)free(fp_abun); if(fp_trans)free(fp_trans); if (nw_result){ free_nw_result(nw_result, nw_size); free(nw_result); } if(nw_alen)free(nw_alen); }/* amplici_free */ /** * Free space used for NW alignments. * * @param nw_result the memory with alignments * @param space number of alignments * **/ void free_nw_result(unsigned char ***nw_result, unsigned int space) { for (unsigned int i = 0; i < space; i++) { if (nw_result[i]) { for (unsigned int j = 0; j < 2; j++) if (nw_result[i][j]) free(nw_result[i][j]); free(nw_result[i]); } nw_result[i] = NULL; } //free(nw_result); }/* free_nw_result */ /** * Obtain Needleman-Wunsch alignment between a selected haplotype and all other * unique sequences. Stores alignment for each unique sequence in * \par nw_result and alignment length in \par nw_alen. Also stores number * of mismatches in initializer::nw_mismatch and number of indels in * initializer::nw_indels. * * [KSD] This code is largely duplicated with check_fp_with_indels(). Consider * [KSD] putting the duplicated stuff in one location. Also, it is not * [KSD] clear why the results are stored in the initializer. In fact, it is * [KSD] not clear why there is an initializer object at all. Put in the data * [KSD] object? * * @param opt pointer to options object * @param dat pointer to data object * @param ini pointer to initializer object * @param nw_result nw alignment result (to be set) * @param nw_alen length of nw alignemnt (to be set) * @param size num of unique sequences * @param select current selected haplotype * * @return err error status **/ int nwalign_matrix(options *opt, data *dat, initializer *ini, unsigned char ***nw_result, size_t *nw_alen, unsigned int size, unsigned int select) { int err = NO_ERROR; //unsigned int size; size = dat->hash_length; int ends_free = 1; // should be ends_free alignment. No panalty for gaps in the end unsigned char *ref_seq = ini->seeds[select]; unsigned int ref_len = ini->seed_lengths[select]; /* align each unique sequence to current haplotype */ for (unsigned int i = 0; i < size; ++i) { size_t alen = dat->max_read_length; unsigned char **aln = NULL; unsigned int rlen = dat->lengths[i]; /* only when sequences are different, we need alignment */ aln = nwalign(ref_seq, dat->dmat[ini->uniq_seq_idx[i]], (size_t) ref_len, (size_t) dat->lengths[ini->uniq_seq_idx[i]], opt->score, opt->gap_p, opt->band, ends_free, NULL, &err, &alen); #if DEBUG fprintf(stderr, "alignment"); for (size_t j = 0; j < alen; ++j) { fprintf(stderr, "%c", aln[0][j] == '-' ? '-' : xy_to_char[(int) aln[0][j]]); } fprintf(stderr, "\n"); for (size_t j = 0; j < alen; ++j) { fprintf(stderr, "%c", aln[1][j] == '-' ? '-' : xy_to_char[(int) aln[1][j]]); } #endif nw_result[i] = aln; nw_alen[i] = alen; /* calculate number of indels and mismatch based on alignment */ ana_alignment(aln, alen, rlen, &ini->nw_indels[select*size+i], &ini->nw_mismatch[select*size+i], opt->info); } return err; }/* nwalign_matrix */ /** * malloc additional space specific for amplici * * @param opt pointer to options object * @param dat pointer to data object * @param ini pointer to initializer object * @param array_fp sequences of false positives * @param fp_abun estiamted abundance of false positives * @param fp_trans transition prob of false positive * @param nw_result nw alignment result * @param nw_alen length of nw alignemnt * @param K_space space for K clusters * @param n_candidate total number of candidates * * @return err error status **/ int amplici_malloc(options *opt, data *dat, initializer *ini, unsigned int **array_fp, double **fp_abun, double **fp_trans, unsigned char ****nw_result, size_t **nw_alen, unsigned int K_space, unsigned n_candidate) { UNUSED(array_fp); UNUSED(fp_abun); UNUSED(fp_trans); /* true abundance table */ if (!ini->abun_true) ini->abun_true = malloc(n_candidate * sizeof * ini->abun_true); if (!ini->abun_true) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.abun_true"); /* haplotypes idx in unique sequence table * need reallocation when K increases */ if (!ini->H) ini->H = malloc(K_space * sizeof * ini->H); if (!ini->H) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.H"); /* Haplotypes abundance */ if (!ini->H_abun) ini->H_abun = malloc(K_space * sizeof * ini->H_abun); if (!ini->H_abun) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.H_abun"); /* haplotypes expected number of errors */ if (!ini->H_ee) ini->H_ee = malloc(K_space * sizeof * ini->H_ee); if (!ini->H_ee) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.H_ee"); /* p-value of haplotypes */ if (!ini->H_pvalue) ini->H_pvalue = malloc(K_space * sizeof *ini->H_pvalue); if (!ini->H_pvalue) return mmessage(ERROR_MSG,MEMORY_ALLOCATION,"amplici.H_pvalue"); /* false positive table may need reallocation */ #ifdef STORE_FP if (opt->check_false_positive) { //if (!*array_fp) *array_fp = malloc(K_space * sizeof **array_fp); //if (!*fp_abun) *fp_abun = malloc(K_space * sizeof **fp_abun); //if (!*fp_trans) *fp_trans = malloc(K_space * dat->sample_size * sizeof **fp_trans); if (!*fp_abun || !*array_fp || !*fp_trans ) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.fp"); } #endif /* malloc transition matrix between reads and haplotypes (log value) * need reallocation when K increases */ if (!ini->e_trans) ini->e_trans = malloc(dat->sample_size * K_space * sizeof * ini->e_trans); if (!ini->e_trans) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.e_trans"); /* self transition matrix */ if (!ini->self_trans) ini->self_trans = malloc(dat->sample_size * sizeof * ini->self_trans); if (!ini->self_trans) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.self_trans"); /* simplified results of nw alignment, (record it initializer struct ) need reallocation when K increases. could also be used as a bound to update true abundance. */ if (opt->nw_align == ALIGNMENT_UNIQ_SEQ) { if (!ini->nw_mismatch) ini->nw_mismatch = calloc(dat->hash_length * K_space, sizeof * ini->nw_mismatch); if (!ini->nw_mismatch) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.nw_mismatch"); if (!ini->nw_indels) ini->nw_indels = calloc(dat->hash_length * K_space, sizeof * ini->nw_indels); if (!ini->nw_indels) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.nw_indels"); /* results of nw alignment */ /* should think about a more effcient way to do this. nw alignment is really time consuming */ /* Just store the result for each haplotypes (memory efficient) */ //if (!*nw_result) *nw_result = malloc(dat->hash_length * sizeof(**nw_result)); //if (!*nw_alen) *nw_alen = malloc(dat->hash_length * sizeof ** nw_alen); if (!*nw_result || !*nw_alen) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.nw"); } return NO_ERROR; }/* amplici_malloc */ /** * realloc space when num of clusters or num of false positives increases * * @param opt pointer to options object * @param mod pointer to model object * @param ini pointer to initializer object * @param array_fp sequences of false positives * @param fp_abun estimated abundance of false positives * @param fp_trans transition prob of false positive * @param preK current space for preK clusters * @param K realloc space for K clusters * @param pre_nfp current space for pre_nfp false positives * @param nfp realloc space for nfp false positives * @param sample_size number of total reads * @param hash_length total number of candidates * @param max_read_length length of reads * * @return error status * * need to realloc space for pi, haplotype, distance, eik in model object * seeds, seed_idx, seed_lengths in initializer object * */ int amplici_realloc(options *opt, initializer *ini, model *mod, unsigned int **array_fp, double **fp_abun, double **fp_trans, unsigned int preK, unsigned int K, unsigned int pre_nfp, unsigned int nfp, size_t sample_size, unsigned int hash_length, unsigned int max_read_length) { int err = NO_ERROR; #ifdef STORE_FP unsigned int fp_change = nfp - pre_nfp; if (fp_change) { unsigned int *array_fp_copy = realloc(*array_fp, nfp * sizeof **array_fp); double * fp_abun_copy = realloc(*fp_abun, nfp * sizeof **fp_abun); double *fp_trans_copy = realloc(*fp_trans,nfp * sample_size * sizeof **fp_trans); if (!array_fp_copy || !fp_abun_copy || !fp_trans_copy) { if (array_fp_copy) free(array_fp_copy); if (fp_abun_copy) free(fp_abun_copy); if (fp_trans_copy) free(fp_trans_copy); return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.realloc.fp"); } *array_fp = array_fp_copy; *fp_abun = fp_abun_copy; *fp_trans = fp_trans_copy; } #else UNUSED(array_fp); UNUSED(fp_abun); UNUSED(fp_trans); UNUSED(nfp); UNUSED(pre_nfp); #endif unsigned int K_change = K - preK; if (K_change > 0) { /* H and H_abun */ unsigned int *H = realloc(ini->H, K * sizeof * ini->H); double *H_abun = realloc(ini->H_abun, K * sizeof * ini->H_abun); double *H_ee = realloc(ini->H_ee, K * sizeof * ini->H_ee); double *H_pvalue = realloc(ini->H_pvalue,K * sizeof * ini->H_pvalue); if (!H || !H_abun || !H_ee || !H_pvalue) { if (H) free(H); if (H_abun) free(H_abun); if (H_ee) free(H_ee); if (H_pvalue) free(H_pvalue); return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.realloc.H"); } ini->H = H; ini->H_abun = H_abun; ini->H_ee = H_ee; ini->H_pvalue = H_pvalue; /* e_trans */ double *e_trans = realloc(ini->e_trans, sample_size * K * sizeof *ini->e_trans); if (!e_trans) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.realloc.e_trans"); ini->e_trans = e_trans; /* nw mismatch and mw_indels */ if (opt->nw_align == ALIGNMENT_UNIQ_SEQ) { unsigned int *nw_mismatch = realloc(ini->nw_mismatch, hash_length * K * sizeof *ini->nw_mismatch); unsigned int *nw_indels = realloc(ini->nw_indels, hash_length * K * sizeof * ini->nw_indels); if (!nw_mismatch || !nw_indels) { if (nw_mismatch) free(nw_mismatch); if (nw_indels) free(nw_indels); return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.realloc.nw"); } ini->nw_mismatch = nw_mismatch; ini->nw_indels = nw_indels; } /* haplotype, pi, eik */ double *pi = realloc(mod->pi, K * sizeof *mod->pi); //unsigned char *haplotypes = realloc(mod->haplotypes, // max_read_length * K * sizeof *mod->haplotypes); double *eik = realloc(mod->eik, sample_size * K * sizeof *mod->eik); double *distance = realloc(mod->distance, K * sizeof *mod->distance); double *JC_ll_K = realloc(mod->JC_ll_K, K * sizeof *mod->JC_ll_K); unsigned int *cluster_size = realloc(ini->cluster_size, K * sizeof *ini->cluster_size); if (!pi || !eik || !distance || !cluster_size || !JC_ll_K) { if (pi) free(pi); //if (haplotypes) free(haplotypes); if (eik) free(eik); if (distance) free(distance); if (cluster_size) free(cluster_size); if (JC_ll_K) free(JC_ll_K); return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.realloc.mod"); } mod->pi = pi; //mod->haplotypes = haplotypes; mod->eik = eik; mod->distance = distance; mod->JC_ll_K = JC_ll_K; ini->cluster_size = cluster_size; if((err = realloc_seeds(ini, max_read_length, preK, K))) return err; /* seeds, seeds_length, seed_idx */ //size_t *seed_idx = realloc(ini->seed_idx, K * sizeof *ini->seed_idx); /* unsigned int *seed_lengths = realloc(ini->seed_lengths, K * sizeof *ini->seed_lengths); data_t **seeds = realloc(ini->seeds, K * sizeof *ini->seeds); if ( !seeds || !seed_lengths) { //if (seed_idx) free(seed_idx); if (seeds) free(seeds); if (seed_lengths) free(seed_lengths); return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.realloc.seed"); } // ini->seed_idx = seed_idx; ini->seed_lengths = seed_lengths; // Uninitialized ini->seeds = seeds; data_t *dptr = realloc(ini->seeds[0], max_read_length * K * sizeof **ini->seeds); if (!dptr) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "reallloc.initializer.seeds"); size_t s = 0; if (ini->seeds[0] == dptr) s = preK; for (size_t k = s; k < K; k++) { ini->seeds[k] = dptr; dptr += max_read_length; } */ /* for (unsigned int k = preK; k < K; k++) { seeds[k] = NULL; seeds[k] = calloc(max_read_length, sizeof **ini->seeds); if (!seeds[k]) { //free(seed_idx); free(seeds); free(seed_lengths); for (unsigned int i = preK; i < k; ++i) free(seeds[i]); return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "amplici.realloc.seeds"); } } ini->seeds = seeds; */ } return err; }/* amplici_realloc */ /** * Update estimated scaled true abundance for each uniq seq * * @param opt pointer to opt object * @param dat pointer to data object * @param H_abun abundance of haplotypes * @param self_trans log probability of misread to each read from itself * @param abun_true estimated true abundances of all uniq seq * @param e_trans log probability of misread to each read from existing * haplotypes * @param idx idx of the unique seq in dmat * @param i idx of current unique sequence * @param select total number of selected haplotypes * @param count_i observed abundance of unique sequence * @param conve convergence or not when updating abundance * @param low_bound lower bound of haplotype abundance * * @return error status */ int expected_TrueAbundance(options *opt, data *dat, double *H_abun, double *e_trans, double *self_trans, double *abun_true, size_t *idx, unsigned int count_i, unsigned int select, unsigned int i, int conve, double low_bound) { int err = NO_ERROR; double delta; unsigned int iter = 0; /* find the read idx in the sample set of this unique sequence */ //hash *unit = NULL; unsigned char *seq = dat->dmat[idx[i]]; unsigned int length = dat->lengths[idx[i]]; //HASH_FIND(hh, dat->seq_count, seq, length * sizeof *seq, unit); size_t *idx_array = NULL; if (((err = find_index(dat->seq_count, seq, length, &idx_array)))) return err; /* update true abundance of unique sequence i */ double true_abun_i = iterate_expected_true_abundance(dat->sample_size, idx_array, e_trans, self_trans, H_abun, select, count_i, abun_true[i]); iter++; if (conve) { do { delta = (abun_true[i] - true_abun_i) / abun_true[i]; //delta = abun_true[i]-true_abun_i; /* updated abundance should be smaller than current abundance */ // previous if((delta < 0) || (true_abun_i < 0)) // have fixed the bug in numerical calculations. abun_true[i] = true_abun_i; if (fabs(delta) < opt->epsilon || true_abun_i < low_bound) return NO_ERROR; if (delta < 0) return mmessage(WARNING_MSG, INTERNAL_ERROR, "true abundance increase.\n"); true_abun_i = iterate_expected_true_abundance( dat->sample_size, idx_array, e_trans, self_trans, H_abun, select, count_i, abun_true[i]); if (true_abun_i < 0) return mmessage(WARNING_MSG, INTERNAL_ERROR, "True abundance under 0.\n"); iter ++; if (iter > opt->n_iter_amplici) return mmessage(WARNING_MSG, INTERNAL_ERROR, "exceed max interations,%i\n", iter); } while(1); } else { abun_true[i] = true_abun_i; } return NO_ERROR; }/* expected_TrueAbundance */ /** * One iterate of the fixed point iteration to compute expected abundance of * a unique sequence. * * @param sample_size number of total reads * @param idx_array idx of all reads of the uniq seq in dmat * @param e_trans log probability of misread to each read from existing * haplotypes * @param self_trans log probability of misread to each read from itself * @param H_abun abundance of haplotypes * @param select total number of selected haplotypes * @param obs_abun observed abundance of unique sequence * @param true_abun estimated expected true abundance of unique sequence, x * * @return new estimated expected true abundance, f(x) */ double iterate_expected_true_abundance(unsigned int sample_size, size_t *idx_array, double *e_trans, double *self_trans, double *H_abun, unsigned int select, unsigned int obs_abun, double true_abun){ double sum_pro_H; double true_abun_new = obs_abun; for (unsigned int r = 0; r < obs_abun; r++) { /* prob rmi <- H */ sum_pro_H = 0.; for (unsigned int k =0 ; k < select; k++) sum_pro_H += exp(e_trans[k * sample_size + idx_array[r]]) * H_abun[k]; true_abun_new -= sum_pro_H / (sum_pro_H + exp(self_trans[idx_array[r]]) * true_abun); } return true_abun_new; } /* iterate_expected_true_abundance */ /** * Compute transition probability between all reads and current haplotype * based on nw alignment result. * * @param dat pointer to data object * @param opt pointer to options object * @param ini pointer to initializer object * @param nw_result nw alignment result * @param nw_alen length of nw alignment * @param select current number of haplotypes * @param error_profile input error profile * @param error_encoding nucleotide encoding of error profile * @param H_id idx of the current haplotype candidate * @param adj Pr(#{indels} < read_length) * * @return err error status **/ int ExpTrans_nwalign(data *dat, options *opt, initializer *ini, unsigned char ***nw_result, size_t *nw_alen, unsigned int select, double *error_profile, int err_encoding, unsigned int H_id, double adj) { int err = NO_ERROR; /* align every unique sequence to the candidate haplotype */ if ((err = nwalign_matrix(opt, dat, ini, nw_result, nw_alen, dat->hash_length, select))) return err; unsigned int n = dat->sample_size; double *e_trans = ini->e_trans; if (!e_trans) return mmessage(ERROR_MSG, INTERNAL_ERROR, "cal_e_trans_nw.e_trans"); for (unsigned int r = 0; r < n; r++) { unsigned int id = ini->reads_uniq_id[r]; // id in unique sequenes table unsigned int idx = n * select + r; /* transition prob between same sequences with different * quality score; no need for alignment in this case */ if (id == H_id) { e_trans[idx] = ini->self_trans[r]; // equal to self transition probability } else { unsigned char **align = nw_result[id]; size_t alen = nw_alen[id]; e_trans[idx] = trans_nw(opt, align, alen, ini->nw_mismatch[dat->hash_length*select+id], ini->nw_indels[dat->hash_length*select+id], error_profile, err_encoding, dat->qmat[r], dat->n_quality, adj, dat->lengths[r], dat->error_prob); } } /* free the alignments */ // realloc_nw_result(nw_result, dat->hash_length); free_nw_result(nw_result, dat->hash_length); // [KSD] recommends return err; }/* ExpTrans_nwalign */ /** * Self transition probability * * @param opt pointer to options object * @param dat pointer to data object * @param self_trans self transition prob array * @param error_profile input error profile * @param error_encoding nucleotide encodings of error profile * @param adj Pr(#{indels} <= read_length) * @return err error status **/ int Expected_SelfTrans(options *opt, data *dat, double *self_trans, double *error_profile, int err_encoding, double adj) { double log_epsilon = opt->epsilon_aln; /* [TODO] parallelize */ //#pragma omp parallel for for (unsigned int r = 0; r < dat->sample_size; r++) { self_trans[r] = 0; if (opt->indel_model == INDEL_PER_READ && opt->nw_align == ALIGNMENT_UNIQ_SEQ) self_trans[r] += dpois(0, opt->indel_error * dat->lengths[r], 1) - adj; for (unsigned int j = 0; j < dat->lengths[r]; j++) { /* to accelerate? */ // if( self_trans[r] < log_epsilon ) { // self_trans[r] = log_epsilon; // break; // } if (error_profile) { if (err_encoding == STD_ENCODING) self_trans[r] += translate_error_STD_to_XY(error_profile, dat->n_quality, dat->dmat[r][j], dat->dmat[r][j], dat->qmat[r][j]); else if (err_encoding == XY_ENCODING) self_trans[r] += error_profile[(NUM_NUCLEOTIDES * dat->dmat[r][j] + dat->dmat[r][j]) * dat->n_quality+dat->qmat[r][j]]; } else { //double ep = adj * error_prob(dat->fdata, dat->qmat[r][j]); double ep = dat->error_prob[dat->qmat[r][j]]; self_trans[r] += log(1 - ep); } } } return NO_ERROR; }/* Expected_SelfTrans */ /** * Transition probability without nw alignment. * * @param opt pointer to options object * @param dat pointer to data object * @param ini pointer to initializer object * @param select total number of selected haplotypes * @param H_id idx of the current haplotype candidate * @param error_profile input error profile * @param error_encoding nucleotide encodings of error profile * * @return err error status **/ int ExpTrans_nogap(data *dat, options *opt, initializer *ini, unsigned int H_id, unsigned int select, double *error_profile, int err_encoding) { // double log_epsilon = opt->epsilon_aln; unsigned char *seq = ini->seeds[select]; unsigned int n = dat->sample_size; unsigned int idx; double l1third = log(1./3); for (unsigned int r = 0; r < dat->sample_size; r++) { unsigned int id = ini->reads_uniq_id[r]; idx = n * select + r; if (id == H_id) { ini->e_trans[idx] = ini->self_trans[r]; } else { ini->e_trans[idx] = 0.; for (unsigned int j = 0; j < dat->lengths[r]; j++) { /* to accelerate? */ // if (ini->e_trans[idx] < log_epsilon) { // ini->e_trans[idx] = log_epsilon; // break; // } if (error_profile) { if (err_encoding == STD_ENCODING) ini->e_trans[idx] += translate_error_STD_to_XY( error_profile, dat->n_quality, seq[j], dat->dmat[r][j], dat->qmat[r][j]); else if (err_encoding == XY_ENCODING) ini->e_trans[idx] += error_profile[(NUM_NUCLEOTIDES * seq[j] + dat->dmat[r][j]) * dat->n_quality + dat->qmat[r][j]]; } else { //double ep = adj * error_prob(dat->fdata, dat->qmat[r][j]); double ep = dat->error_prob[dat->qmat[r][j]]; if (dat->dmat[r][j] == seq[j]) ini->e_trans[idx] += log(1 - ep); else ini->e_trans[idx] += log(ep) + l1third; } } } } return NO_ERROR; }/* ExpTrans_nogap */ /** * Transition probability (logged) between one read and current haplotype * based on nw alignment result. * * @param opt pointer to options object * @param aln nw alignment result * @param alen alignment length * @param mismatch num of mismatches ([KSD] Why mismatch?) * @param ngap num of gaps * @param error_profile input error profile * @param error_encoding nucleotide encodings of error profile * @param rqmat read quality score sequence * @param adj Pr(#{indels} <= read_length) * @param rlen read length * @param n_quality the number of possible quality scores * @param error_prob error prob indicated by quality score * * @return e_trans log transition prob **/ double trans_nw(options *opt, unsigned char **aln, size_t alen, unsigned int mismatch, unsigned int ngap, double *error_profile, int err_encoding, unsigned char *rqmat, unsigned char n_quality, double adj, unsigned int rlen, double *error_prob) { int fxn_debug = ABSOLUTE_SILENCE; double log_epsilon = opt->epsilon_aln; double e_trans = 0; /* prob of an indel event */ if (opt->indel_model == INDEL_PER_READ) e_trans += dpois(ngap, opt->indel_error * rlen, 1) - adj; /* may be used later */ unsigned int pre_nmismatch = mismatch; unsigned int pre_ngap = ngap; double l1third = log(1./3); unsigned int nins = 0; unsigned int ndel = 0; unsigned int nindel = 0; unsigned int nmatch = 0; unsigned int nmismat = 0; if (aln) { for (size_t j = 0; j < alen; j++) { // commented Nov 25 // if (e_trans < log_epsilon) { // for acceleration // e_trans = log_epsilon; // break; // } unsigned int j1 = j - nins; // pos idx of hap unsigned int j2 = j - ndel; // pos idx of read /* gaps in the end */ if (j2 >= rlen) // No data break; if (j1 >= rlen) { //assume there is no error for the missing data in the end if (opt->indel_model != INDEL_PER_SITE1 && opt->indel_model != INDEL_PER_SITE2) continue; /* [KSD] The gory details of computing a * [KSD] read error should not be * [KSD] exposed here and other places. * [KSD] Consider an (inline) function. */ /* for per-site indel models */ if (error_profile) { if (err_encoding == STD_ENCODING) e_trans += translate_error_STD_to_XY( error_profile, n_quality, aln[1][j], aln[1][j], rqmat[j2]); else if (err_encoding == XY_ENCODING) e_trans += error_profile[ (NUM_NUCLEOTIDES * aln[1][j] + aln[1][j]) * n_quality + rqmat[j2]]; } else { double ep = error_prob[rqmat[j2]]; e_trans += log(1 - ep); } continue; } /* insertion */ if (aln[0][j] == '-') { nins ++; if (j == 0) nindel++; else if (aln[0][j-1] != '-') nindel++; if (opt->indel_model == INDEL_PER_SITE1) e_trans += log(opt->insertion_error); /* [KSD] precompute */ else if (opt->indel_model == INDEL_PER_SITE2) e_trans += log(1./4); /* [KSD] precompute */ continue; } /* deletion */ if (aln[1][j] == '-') { ndel ++; if (j == 0) nindel++; else if (aln[1][j-1] != '-') nindel++; if (opt->indel_model == INDEL_PER_SITE1) e_trans += log(opt->deletion_error); continue; } /* match and mismatch */ if (error_profile) { if (err_encoding == STD_ENCODING) e_trans += translate_error_STD_to_XY( error_profile, n_quality, aln[0][j], aln[1][j], rqmat[j2]); else if (err_encoding == XY_ENCODING) e_trans += error_profile[( NUM_NUCLEOTIDES * aln[0][j] + aln[1][j]) * n_quality + rqmat[j2]]; if (aln[0][j] == aln[1][j]) nmatch++; else nmismat++; } else { double ep = error_prob[rqmat[j2]]; if (aln[0][j] == aln[1][j]) { nmatch++; e_trans += log(1 - ep); } else { nmismat++; e_trans += log(ep) + l1third; } } } /* [KSD] This is when banded alignment fails? YES */ } else { e_trans = log_epsilon; } /* codes for debugging */ if (fxn_debug & pre_ngap & pre_nmismatch) { if (nindel != pre_ngap || nmismat != pre_nmismatch) { mmessage(INFO_MSG, NO_ERROR, "alen: %d\n", alen); mmessage(INFO_MSG, NO_ERROR, "pre_ngap: %d; pre_mismatch:%d \n", pre_ngap,pre_nmismatch); mmessage(INFO_MSG, NO_ERROR, "ngap: %d; mismatch: %d \n", nindel,nmismat); mmessage(ERROR_MSG, INTERNAL_ERROR, "result not match, should have bug here\n"); for (size_t j = 0; j < alen; ++j) fprintf(stderr, "%c", aln[0][j] == '-' ? '-' : xy_to_char[(int) aln[0][j]]); fprintf(stderr, "\n"); for (size_t j = 0; j < alen; ++j) fprintf(stderr, "%c", aln[1][j] == '-' ? '-' : xy_to_char[(int) aln[1][j]]); } } return e_trans; }/* trans_nw */ /** * Decide whether a candidate haplotype should be included in the haplotype set. * - If \par options::nw_align == ALIGNMENT_HAPLOTYPES, then align the candidate * to existing haplotypes and * Check false positives or compute model log likelihoood and bic (final) * * @param opt pointer to options object * @param dat pointer to data object * @param mod pointer to model object * @param ini pointer to initializer object * @param K number of selected haplotypes, including candidate * @param low_bound lower bound of haplotype abundance * @param error_profile input error profile * @param ord idx of the current haplotype candidate * @param n_candidates number of total candidates * @param fp false positive or not (return) * @param final final step ? * * @return error status * * * [TODO] it is too complex. Simplify it **/ int evaluate_haplotype(options *opt, data *dat, model *mod, initializer *ini, unsigned int K, double low_bound, double *error_profile, unsigned int ord, unsigned int n_candidates, int *fp, int final) { int fxn_debug = opt->info; int err = NO_ERROR; double new_bic = INFINITY, pre_bic = mod->bic; double new_aic = INFINITY, pre_aic = mod->aic; double JC_ll = 0; int false_positive; int diagnose = 0; /* turn on diagnostic mode */ unsigned int curr_K = K - 1; unsigned int n_param = K * dat->max_read_length + curr_K // haplotypes and pi + (opt->use_error_profile?dat->n_quality*12:0); // Previous is dat->n_quality * 16 mod->n_param = n_param; *fp = 1; if (!final) { /* check for indel errors */ if (opt->nw_align == ALIGNMENT_HAPLOTYPES) { if ((err = check_fp_with_indels(opt, dat, mod, ini, curr_K, low_bound, error_profile, &false_positive))) return err; debug_msg(DEBUG_I, fxn_debug, "indel errors?: %i.\n", false_positive); if (false_positive) return NO_ERROR; // *fp = 1 } /* contamination diagnostic */ unsigned int rlen = ini->seed_lengths[curr_K]; unsigned char *rseq = ini->seeds[curr_K]; size_t *idx_array = NULL; unsigned int count = ini->uniq_seq_count[ini->H[curr_K]]; if ((err = find_index(dat->seq_count, rseq, rlen, &idx_array))) return err; if ((err = abun_pvalue(opt, ini, idx_array, ini->e_trans, count, curr_K, opt->contamination_threshold, &ini->H_pvalue[curr_K], dat->sample_size, 0))) return err; /* control the type I error here */ double adjp = opt->p_threshold; if (ini->H_pvalue[curr_K] > adjp) { debug_msg(DEBUG_I, fxn_debug, "remove false positive " "with Diagnostic Probability %8.2e with threshold %8.2e \n", ini->H_pvalue[curr_K], adjp); if (!diagnose) return NO_ERROR; // *fp = 1 } if ((err = mean_exp_errors(dat, ini->uniq_seq_idx[ord], ini->uniq_seq_count[ord], &ini->H_ee[curr_K]))) return err; debug_msg(DEBUG_II, fxn_debug, "The mean exp. n.err of %d is " "%.3f\n", ord, ini->H_ee[curr_K]); } /* recalculate transition probabilities here (currently only for * ALIGNMENT_HAPLOTYPES) with new candidate haplotype */ /* [XP] I change to here from the main function haplotype_selection() because those transition * [XP] probabilities are needed for bic check. Since we may not get * [XP] here, it can save time if it is a false positive by removed indel or * [XP] contamination test. */ /* [XP] Other settings of options::nw_align are handled elsewhere; * [XP] need more thought. */ /* Transition prob with alignment free strategy */ if (opt->nw_align == ALIGNMENT_HAPLOTYPES && !final) ExpTrans_nogap(dat, opt, ini, ord, curr_K, error_profile, mod->err_encoding); /* update haplotypes */ /* [TODO] modify the function and not use mod->haplotypes (just for Acceleration) */ //for (unsigned int k = 0; k < K; k++) /* [KSD] Haven't the first K-1 already been copied? */ //memcpy(&mod->haplotypes[curr_K * dat->max_read_length], // ini->seeds[curr_K], dat->max_read_length // * sizeof *mod->haplotypes); /* update pi */ Est_pi(ini, mod->pi, dat->sample_size, K, 1); /* calculate ll */ mod->ll = Simple_Estep(mod, dat->sample_size, ini->e_trans, K); /* calculate aic and bic */ if (opt->JC69_model) { modified_ic(ini->seeds[0], mod->est_ancestor, mod->distance, mod->ll, K, &JC_ll, &new_aic, &new_bic, n_param, dat->max_read_length, dat->sample_size); } else { new_aic = aic(mod->ll, n_param); new_bic = bic(mod->ll, n_param, dat->sample_size); JC_ll = 0; } mod->JC_ll = JC_ll; // if (final) // debug_msg(DEBUG_I, fxn_debug, "Final ll: %f; Final JC_ll: %f.\n", // mod->ll, mod->JC_ll); // else debug_msg(DEBUG_I, fxn_debug, "ll: %f; JC_ll: %f.\n", mod->ll, mod->JC_ll); /* better bic means the chosen seed is not a false positive */ if (!diagnose && (opt->use_aic ? new_aic : new_bic) < (opt->use_aic ? pre_aic : pre_bic)) { *fp = 0; mod->bic = new_bic; mod->aic = new_aic; } else if (diagnose && (opt->use_aic ? new_aic : new_bic) < (opt->use_aic ? pre_aic : pre_bic)) { debug_msg(DEBUG_I, fxn_debug, "Would remove false positive by delta(BIC) = %f.\n", new_bic - pre_bic); } //if (!final) { debug_msg(DEBUG_I, fxn_debug, "pre_bic : %f. new_bic: %f (%f)\n", pre_bic, new_bic, new_bic - pre_bic); debug_msg(DEBUG_I, fxn_debug, "pre_aic : %f. new_aic: %f (%f)\n", pre_aic, new_aic, new_aic - pre_aic); //} else { // debug_msg(DEBUG_I, fxn_debug, "Final bic: %f\n", new_bic); // debug_msg(DEBUG_I, fxn_debug, "Final aic: %f\n", new_aic); //} return err; }/* evaluate_haplotype */ /** * Estimate pi by: * (1) hard-clustering of reads assigned by maximum transition probability from * each of K current haplotypes if \par reassign, or * (2) normalization of current estimated scaled true abundances if not * \par reassign. * * @param ini pointer to initializer object * @param pi cluster relative abundance (to be computed) * @param sample_size total num of reads * @param K current number of clusters * @param reassign use hard-clustering by transition probability * * @return error status * **/ int Est_pi(initializer *ini, double *pi, size_t sample_size, unsigned int K, int reassign) { double max; unsigned int class; if (reassign) { for (unsigned int k = 0; k < K; k++) ini->cluster_size[k] = 0; for (unsigned int i = 0; i < sample_size; i++) { max = -INFINITY; class = 0; for (unsigned int k = 0; k < K; ++k) { if (max < ini->e_trans[k * sample_size + i]) { max = ini->e_trans[k * sample_size + i]; class = k; } } ini->cluster_size[class]++; ini->cluster_id[i] = class; } for (unsigned int k = 0; k < K; ++k){ pi[k] = (double) ini->cluster_size[k] / sample_size; pi[k] = log(pi[k]); } } else { double abun_sum = 0; for (unsigned int k = 0; k < K; k++) abun_sum += ini->H_abun[k]; for (unsigned int k = 0; k < K; k++){ pi[k] = ini->H_abun[k] / abun_sum; pi[k] = log(pi[k]); } } return NO_ERROR; }/* Est_pi */ /** * update seeds table in initializer when select a new haplotype * * @param ini pointer to initializer object * @param dat pointer to data object * @param select num of preselected haplotypes * @param ord idx of the new haplotype * * @return err error status * **/ int update_seeds(data *dat, initializer *ini, unsigned int select, unsigned int ord){ //ini->seed_idx[select] = ini->uniq_seq_idx[ord]; // idx in dmat and qmat ini->seed_lengths[select] = dat->lengths[ini->uniq_seq_idx[ord]]; memcpy(ini->seeds[select], dat->dmat[ini->uniq_seq_idx[ord]], dat->max_read_length * sizeof **ini->seeds); ini->H[select] = ord ; // idx in unique sequence table ini->H_abun[select] = ini->abun_true[ord]; // estimated true abundance of haplotypes return NO_ERROR; }/* update_seeds */ /** * Check candidate haplotype for possible indel misread of existing haplotypes. * Specifically, align the candidate haplotype to all previous haplotypes and * reestimate the scaled true abundance. If it drops below the threshold, * the candidate haplotype is considered a "false positive". * * @param opt pointer to options object * @param dat pointer to data object * @param mod pointer to model object * @param ini pointer to initializer object * @param low_bound lower bound of haplotype abundance * @param error_profile input error profile * @param select number of haplotypes, excluding candidate * @param fp detect as false positive (computed) * * @return err error status * * * [TODO] it is too complex. Simplify it **/ int check_fp_with_indels(options *opt, data *dat, model *mod, initializer *ini, unsigned int select, double low_bound, double *error_profile, int *fp) { int err = NO_ERROR; int fxn_debug = ABSOLUTE_SILENCE; /* nw alignment for the candidate sequence and existing haplotypes */ unsigned char ***nw_result = NULL; size_t *nw_alen = NULL; unsigned int *nw_mismatch = NULL; unsigned int *nw_indels = NULL; *fp = 0; nw_result = malloc(select * sizeof *nw_result); nw_alen = malloc(select * sizeof *nw_alen); nw_mismatch = malloc(select * sizeof *nw_mismatch); nw_indels = malloc(select * sizeof *nw_indels); if (!nw_result || !nw_alen || !nw_mismatch || !nw_indels) { err = mmessage(ERROR_MSG, MEMORY_ALLOCATION, "checkfp.nw"); goto EXIT_CHECK_FP_WITH_INDELS; } int ends_free = 1; // ends-free alignment: no penalty for terminal gaps /* haplotype candidate */ unsigned int rlen = ini->seed_lengths[select]; unsigned char *rseq = ini->seeds[select]; /* align candidate haplotype to each existing haplotype */ /* [KSD] duplicative code with nwalign_matrix() */ for (unsigned int k = 0; k < select; k++) { size_t alen; unsigned int ref_len = ini->seed_lengths[k]; unsigned char *ref_seq = ini->seeds[k]; unsigned char **aln = nwalign(ref_seq, rseq, (size_t) ref_len, (size_t) rlen, opt->score, opt->gap_p, opt->band, ends_free, NULL, &err, &alen); #if DEBUG //fprintf(stderr, "alignment"); for (size_t j = 0; j < alen; ++j) { fprintf(stderr, "%c", aln[0][j] == '-' ? '-' : xy_to_char[(int) aln[0][j]]); } fprintf(stderr, "\n"); for (size_t j = 0; j < alen; ++j) { fprintf(stderr, "%c", aln[1][j] == '-' ? '-' : xy_to_char[(int) aln[1][j]]); } fprintf(stderr, "\n"); #endif nw_result[k] = aln; nw_alen[k] = alen; /* calculate number of indels and mismatch based on alignment */ ana_alignment(aln, alen, rlen, &nw_indels[k], &nw_mismatch[k], opt->info); /* diagnostic output: */ //debug_msg(DEBUG_I, DEBUG_I, "haplotype %u alignment length %zu; #indels %u; #mismatches %u\n", k, alen, nw_indels[k], nw_mismatch[k]); } /*------------------------------------------------------------------- */ /* recalculate transition prob for each read with same sequence as * candidate haplotype now using NW alignment; need to allocate memory */ double *e_trans = NULL; size_t *idx_array = NULL; unsigned int count = ini->uniq_seq_count[ini->H[select]]; if ((err = find_index(dat->seq_count, rseq, rlen, &idx_array))) goto EXIT_CHECK_FP_WITH_INDELS; e_trans = malloc(select * count * sizeof *e_trans); if (!e_trans) { err = mmessage(ERROR_MSG, MEMORY_ALLOCATION, "checkfp.e_trans"); goto EXIT_CHECK_FP_WITH_INDELS; } for (unsigned int k = 0; k < select; k++) { double tmp = 0; for (unsigned int r = 0; r < count; r++) { e_trans[k*count + r] = trans_nw(opt, nw_result[k], nw_alen[k], nw_mismatch[k], nw_indels[k], error_profile, mod->err_encoding, dat->qmat[idx_array[r]], dat->n_quality, mod->adj_trunpois, rlen, dat->error_prob); tmp += e_trans[k*count + r]; #if DEBUG if (nw_indels[k] == 1) { //mmessage(INFO_MSG, NO_ERROR, "indel rate: %f\n", temp); mmessage(INFO_MSG, NO_ERROR, "the trans prob of the %i th read to hap %i is %f \n",r, k, e_trans[k*count + r]); mmessage(INFO_MSG, NO_ERROR, "The self trans is %f\n", ini->self_trans[idx_array[r]]); } #endif } } /* ------------------------------------------------------------------ */ /* reestimate scaled true abundance of candidate haplotype */ /* [KSD] duplicative code with other locations; merge to avoid * [KSD] introducing differences in, e.g., convergence criteria. */ double true_abun = count; double sum_pro_H, true_abun_new; unsigned int iter = 0; double delta; do { true_abun_new = count; //true_abun_var = 0; for (unsigned int r = 0; r < count; r++) { double self_ts = ini->self_trans[idx_array[r]]; /* modify self trans rate if we consider indels */ if (opt->indel_model == INDEL_PER_READ) self_ts += dpois(0, opt->indel_error * rlen, 1) - mod->adj_trunpois; sum_pro_H = 0.; for (unsigned int k =0 ; k < select; k++) sum_pro_H += exp(e_trans[k * count + r]) * ini->H_abun[k]; true_abun_new -= sum_pro_H / (sum_pro_H + exp(self_ts) * true_abun); } delta = (true_abun - true_abun_new) / true_abun; if (delta < 0 || true_abun_new < 0) return mmessage(WARNING_MSG, INTERNAL_ERROR, "True abundnace increase or under 0\n"); if (delta < opt->epsilon || true_abun_new < low_bound) break; true_abun = true_abun_new; iter ++; if (iter > opt->n_iter_amplici) return mmessage(WARNING_MSG, INTERNAL_ERROR, "exceed max interations,%i\n", iter); } while (1); /* just a simple check for indel errors */ if (true_abun_new < low_bound) *fp = 1; debug_msg(DEBUG_I, fxn_debug,"observe abundance: %i; previous " "abundance: %f; new_abundance: %f \n", count, ini->H_abun[select], true_abun_new); if (!(*fp)) /* use true_abun_new as the current abundance */ ini->H_abun[select] = true_abun_new; EXIT_CHECK_FP_WITH_INDELS: if (nw_alen) free(nw_alen); if (nw_result){ free_nw_result(nw_result,select); free(nw_result); } if (nw_mismatch) free(nw_mismatch); if (nw_indels) free(nw_indels); if (e_trans) free(e_trans); return err; }/* check_fp_with_indels */ /** * Contamination diagnosis. We perform a simple disgnostic test for * contamination, where the contamination process is assumed to produce a fixed * number of reads of every candidate. If the number of observed reads is * considerably higher than this threshold and not explained by misreads from * the current haplotype set, then we believe the candidate haplotype is real. * * @param opt pointer to options object * @param ini pointer to initializer object * @param idx_array indices of reads matching candidate haplotype * @param count abserved abundance of candidate haplotype * @param e_trans transition probability matrix * @param select index of candidate haplotype * @param threshold threshold for contamination * @param error_profile input error profile * @param p pointer to p-value * @param sample_size total number of reads * @param partial is e_trans for all data or just the reads matching * candidate haplotype * * @return err error status * **/ int abun_pvalue(options *opt, initializer *ini, size_t *idx_array, double *e_trans, unsigned int count, unsigned int select, unsigned int threshold, double *p, size_t sample_size, int partial) { int fxn_debug = opt->info; double true_abun_var = 0.; double abun_null = 0.; // estimated number of count from others under null hypothesis double gamma = 0.; unsigned int rlen = ini->seed_lengths[select]; double true_abun = ini->H_abun[select]; // note the abundance of the haplotype is fixed when generate p value double *perr = NULL; /* compute exact p value if n is small enough */ if (count < MAX_N_EXACT_P) { perr = malloc(count * sizeof *perr); if (!perr) return mmessage(ERROR_MSG, MEMORY_ALLOCATION, "perr"); } for (unsigned int r = 0; r < count; r++) { double self_ts = ini->self_trans[idx_array[r]]; /* modify self trans rate if we consider indels (no longer use) */ //if (opt->indel_model == INDEL_PER_READ) // self_ts += dpois(0, opt->indel_error * rlen, 1); double Ey = exp(self_ts) * true_abun; //previous double Ey = exp(self_ts)*true_abun double Ex = 0.; for (unsigned int k =0 ; k < select; k++) { if (partial) Ex += exp(e_trans[k * count + r]) * ini->H_abun[k]; else Ex += exp(e_trans[k * sample_size + idx_array[r]]) * ini->H_abun[k]; } //mmessage(INFO_MSG,NO_ERROR,"Ey:%f; Ex: %f \n", Ey, Ex); double p = Ex / (Ex + Ey); double Varf = p * (1 - p); if (perr) perr[r] = p; //mmessage(INFO_MSG,NO_ERROR,"%f \n",p); true_abun_var += Varf; abun_null += p; gamma += p * (1 - p) * (1 - 2 * p); } debug_msg(DEBUG_II, fxn_debug, "variance under null: %f \n", true_abun_var); debug_msg(DEBUG_II, fxn_debug, "N_H->sm under null: %f \n", abun_null); int bound = count - (threshold+1); debug_msg(DEBUG_I, fxn_debug, "bound=%i;\n", bound); debug_msg(DEBUG_I, fxn_debug, "count=%i;\n", count); if (perr){ *p = ppoisbin(bound, count, perr, 1); // P(S > bound) }else{ /* approximate p value */ // *p = ppois(lower_bound, true_abun, 1, 0); /* [KSD] What is this? */ /* avoid numeric problem here */ double sigma = sqrt(true_abun_var); /*** double x; if (fabs(sigma-0.) < DBL_EPSILON) { *p = 0; goto EXITNOW; gamma = INFINITY; x = INFINITY; } else { gamma = gamma / (sigma * sigma * sigma); x = (bound + 0.5 - abun_null) / sigma; } double phi_x = dnorm(x,0,1,0); //double cdf_x = pnorm(x,0,1,0,0); debug_msg(DEBUG_II, fxn_debug, "gamma: %f,x: %f, phi_x:%f \n", gamma, x, phi_x); if (fabs(phi_x - 0.) < DBL_EPSILON) *p = pnorm(x,0,1,1,0); else *p = pnorm(x,0,1,1,0) + phi_x*gamma*(1.0-x*x)/6; *p = 1.0 - *p; // 1- F(x) if(*p < 0) // avoid bug *p = 0.; //refined normal approximation ***/ *p = pnorm(bound, abun_null, sigma, 0, 0); // normal approximation //*p = ppois(bound,abun_null,0,0); //poisson approximation } debug_msg(DEBUG_I, fxn_debug, "Diagnostic Probability=%8.2e;\n", *p); if (*p < 0 || *p >1) return mmessage(ERROR_MSG, INTERNAL_ERROR, "Diagnostic Probability is not in [0,1] "); EXITNOW: if (perr) free(perr); return NO_ERROR; } /* abun_pvalue */ /** * Compute model log likelihood given estimates of pi and computed * transition probabilities from each existing haplotype. Also * computes posterior probabilities of assignment for each read * using a underflow-safe calculation. * * @param mod pointer to model object * @param sample_size total number of reads * @param e_trans transition probability matrix * @param K number of haplotypes * * @return ll model log likelihood **/ double Simple_Estep(model *mod, size_t sample_size, double *e_trans, unsigned int K) { double ll = 0; double max, sum; unsigned int idx; for (unsigned int i = 0; i < sample_size; i++) { max = -INFINITY; for (unsigned int k = 0; k < K; k++) { idx = k * sample_size + i; mod->eik[idx] = mod->pi[k] + e_trans[idx]; if (max < mod->eik[idx]) max = mod->eik[idx]; } sum = 0.; for (unsigned int k = 0; k < K; ++k) { idx = k * sample_size + i; mod->eik[idx] = exp(mod->eik[idx] - max); sum += mod->eik[idx]; } ll += log(sum) + max; /* actually these posterior probabilities are not used */ for (unsigned k = 0; k < K; ++k) mod->eik[k * sample_size + i] /= sum; } return ll; }/* Simple_Estep */ /* print reads assignment */ void fprint_assignment(FILE *fp, unsigned int *v, size_t n, unsigned int max, int width, int newline){ size_t i; for (i = 0; i < n; ++i){ if(v[i]< max){ if (width) fprintf(fp, " %*u", width, v[i]); else fprintf(fp, " %u", v[i]); }else fprintf(fp, " NA"); } if (newline) fprintf(fp, "\n"); } /* fprint_assignment */ /** * Expected number of errors for one read. * * @param qual quality score seq * @param length length of the seq * @param error_prob error prob indicated by quality score * **/ double exp_errors(unsigned char *qual, unsigned int length, double *error_prob) { double E_err = 0.; for (unsigned int j = 0; j < length; j++) E_err += error_prob[qual[j]]; return E_err; }/* exp_errors */ /** * Compute mean expected number of errors for each uniq seq. [KSD] Why? * * @param dat pointer to data object * @param idx idx of the uniq seq in dmat * @param count_i observed abundance of the uniq seq * @param mean_exp_err pointer to mean_exp_err to set * @return error status **/ int mean_exp_errors(data *dat, unsigned int idx, unsigned int count_i, double *mean_exp_err) { int err; unsigned char *seq = dat->dmat[idx]; unsigned int length = dat->lengths[idx]; size_t *idx_array = NULL; if (((err = find_index(dat->seq_count, seq, length, &idx_array)))) return err; double temp; double sum = 0.; for (unsigned int i = 0; i < count_i; i++) { temp = exp_errors(dat->qmat[idx_array[i]], length, dat->error_prob); sum += temp; } *mean_exp_err = sum / count_i; return err; }/* mean_exp_errors */ /* output the format fasta file for UCHIME (use size) */ void fprint_haplotypes_size(FILE *fp, data_t **data, size_t n, unsigned int* len, double pthres, char const * const prefix, double *pvalue, unsigned int *size, double *ee) { for (size_t i = 0; i < n; ++i) { if(pvalue && pvalue[i]>=pthres) continue; fprintf(fp, ">%s%lu;", prefix, i); if(size) fprintf(fp,"size=%u;",size[i]); if(pvalue) fprintf(fp,"DiagP=%8.2e;",pvalue[i]); if(ee) fprintf(fp, "ee=%.3f;",ee[i]); fprintf(fp, "\n"); for (unsigned int j = 0; j < len[i]; ++j) fprintf(fp, "%c", xy_to_char[(int)data[i][j]]); fprintf(fp, "\n"); } } /* fprint_haplotypes_size */ /* output the format fasta file for UCHIME (use relative true abundance */ void fprint_haplotypes_abun(FILE *fp, data_t **data, size_t n, unsigned int* len, double pthres, char const * const prefix, double *pvalue, double *abun, double *ee) { for (size_t i = 0; i < n; ++i) { if(pvalue && pvalue[i]>=pthres) continue; fprintf(fp, ">%s%lu;", prefix, i); if(abun) fprintf(fp,"size=%.3f;",abun[i]); if(pvalue) fprintf(fp,"DiagP=%8.2e;",pvalue[i]); if(ee) fprintf(fp, "ee=%.3f;",ee[i]); fprintf(fp, "\n"); for (unsigned int j = 0; j < len[i]; ++j) fprintf(fp, "%c", xy_to_char[(int)data[i][j]]); fprintf(fp, "\n"); } } /* fprint_haplotypes_abun */ /** * Assign reads to clusters while filtering on log likelihood or posterior * probability. If a read is filtered out, it gets assigned to cluster with * index K, aka NA. * * @param K number of clusters * @param ll_cutoff cutoff of log likelihood to assign read * @param eik maximum posterior assignment probability (log) * if eik ignore pi and e_trans * @param pi relative abundance for each clusters (log) * @param e_trans transition prob matrix * @param sample_size total number fo reads * @param ri pointer to run_info object * * @return err err status **/ int likelihood_filter(unsigned int K, double ll_cutoff, double *eik, double *pi, double *e_trans, size_t sample_size, run_info *ri) { unsigned int bound = K + 1; for (unsigned int k = 0; k < bound; k++) ri->optimal_cluster_size[k] = 0; /* compute & store maximum conditional log likelihood */ for (unsigned int i = 0 ; i < sample_size; i++) { if (eik) ri->optimal_cluster_ll[i] = eik[ri->optimal_cluster_id[i] * sample_size + i]; else ri->optimal_cluster_ll[i] = pi[ri->optimal_cluster_id[i]] + e_trans[ri->optimal_cluster_id[i] * sample_size + i]; //ri->optimal_cluster_ll[i] = eik[ri->optimal_cluster_id[i] * sample_size + i]; if (ri->optimal_cluster_ll[i] < ll_cutoff) ri->optimal_cluster_id[i] = K; // treat those outliers as a new cluster ri->optimal_cluster_size[ri->optimal_cluster_id[i]]++; } return NO_ERROR; }/* likelihood_filter */ /** * get MMEs of all parameters in the JC69 model. * * @param hap pointer to haplotypes * @param anc ancestor haplotype, to be calculated (tbc) * @param dist expected no. changes/site b/w ancestor & haplotype, tbc * @param K number of haplotypes * @param len length of reads * @return err **/ int m_JC69(unsigned char * hap, unsigned char * anc, double *dist, unsigned int K, unsigned int len) { int err = NO_ERROR; unsigned int count[NUM_NUCLEOTIDES]; unsigned int max_count; /* most common nucleotide across haplotypes is the estimated ancestor */ for (unsigned int j = 0; j < len; j ++){ for (unsigned char n = 0; n < NUM_NUCLEOTIDES; n++) count[n] = 0; for (unsigned int k = 0; k < K; k++) count[hap[k * len + j]]++; max_count = 0; for (unsigned char n = 0; n < NUM_NUCLEOTIDES; ++n){ if (count[n] > max_count) { max_count = count[n]; anc[j] = n; } } } /* estimate the expected number of changes per site of all haplotypes */ for (unsigned int k = 0; k < K; k++) { double tmp = (double) hamming_char_dis( (char *) &hap[k*len], (char *) anc, (size_t) len) / len; /* Previous there is a bug for the estimated distance out of range */ if(tmp >= 0.75) dist[k] = INFINITY; else dist[k] = -0.75 * log(1 - tmp / 0.75); } return err; }/* m_JC69 */ /** * Calculate the log likelihood of all K haplotypes under JC69 model. * * @param hap haplotype sequences * @param anc ancestor sequence * @param dis expected no. changes/site for each haplotype * @param K number of haplotypes * @param len length of the haplotypes * @return err status **/ double e_JC69(unsigned char * hap, unsigned char * anc, double *dist, unsigned int K, unsigned int len) { double ll = 0; for (unsigned int k = 0; k < K; k++) for (unsigned int j = 0; j < len; j++) if (anc[j] == hap[k * len + j]) ll += log(0.25 + 0.75 * exp(-dist[k] / 0.75)); else ll += log(0.25 - 0.25 * exp(-dist[k] / 0.75)); return ll; }/* e_JC69 */ /** * Calculate aic and bic modified by approximate JC69 hierarchical model on * haplotypes. * * @param hap haplotypes * @param est_anc ancester sequence * @param distance distance from haplotypes to ancestor sequence * @param best_ll current log likelihood from data * @param K number of haplotypes * @param JC_ll log likelihood from JC69 model * @param n_aic pointer to aic, value updated * @param n_bic pointer to bic, value updated * @param n_param number of parameters in current model * @param max_read_length length of haplotypes * @param sample_size sample size * * return err status **/ int modified_ic(unsigned char *hap, unsigned char *est_anc, double *distance, double best_ll, unsigned int K, double *JC_ll, double *n_aic, double *n_bic, unsigned int n_param, unsigned int max_read_length, size_t sample_size) { int param_change = 0; m_JC69(hap, est_anc, distance, K, max_read_length); *JC_ll = e_JC69(hap, est_anc, distance, K, max_read_length); /* K branch lengths, ancestral haplotype, but no haplotypes estimated */ param_change = K - max_read_length * (K - 1); *n_aic = aic(best_ll + *JC_ll, n_param + param_change); *n_bic = bic(best_ll + *JC_ll, n_param + param_change, sample_size); return NO_ERROR; }/* modified_ic */ /** * Reads assignment with given haplotype set * * @param ini pointer to initializer object * @param dat pointer data object * @param opt pointer to options object * @param mod pointer to model object * @param ri pointer to run_info object * * return err status **/ int reads_assignment(options * opt, data * dat, model *mod, initializer *ini, run_info *ri) { int err = NO_ERROR; int fxn_debug = opt->info; double l1third = 1./3; /* maybe use error profile */ double *error_profile = NULL; if (opt->use_error_profile && mod->error_profile) { error_profile = mod->error_profile; debug_msg(DEBUG_II, fxn_debug, "Use error profile. \n"); } /* go through with each unique sequences */ for(unsigned int u = 0; u <dat->hash_length; ++u ){ unsigned char *read = dat->dmat[ini->uniq_seq_idx[u]]; unsigned int rlen = dat->lengths[ini->uniq_seq_idx[u]]; unsigned int count = ini->uniq_seq_count[u]; // num. of reads wih unique seq size_t *idx_array; // idx of reads if ((err = find_index(dat->seq_count, read, rlen, &idx_array))) return mmessage(ERROR_MSG, INTERNAL_ERROR, "Cannot find in the hash table !"); /* align to haplotypes */ for (unsigned int h = 0; h < opt->K; ++h){ unsigned char *hap_seq = ini->seeds[h]; if(opt->nw_align == NO_ALIGNMENT){ for(unsigned int r = 0; r < count; ++r){ double eik = 0.; size_t id = idx_array[r]; for (unsigned int j = 0; j < dat->lengths[r]; j++) { if (error_profile) { if (opt->err_encoding == STD_ENCODING) eik += translate_error_STD_to_XY( error_profile, dat->n_quality, hap_seq[j], dat->dmat[id][j], dat->qmat[id][j]); else if (opt->err_encoding == XY_ENCODING) eik += error_profile[(NUM_NUCLEOTIDES * hap_seq[j] + dat->dmat[id][j]) * dat->n_quality + dat->qmat[id][j]]; } else { //double ep = adj * error_prob(dat->fdata, dat->qmat[r][j]); double ep = dat->error_prob[dat->qmat[id][j]]; if (dat->dmat[id][j] == hap_seq[j] ) eik += log(1 - ep); else eik += log(ep) + l1third; } } mod->eik[h*dat->sample_size+ id] = eik; } }else{ size_t alen = dat->max_read_length; unsigned int nindels = 0; unsigned int nmismatch = 0; unsigned char **aln = nwalign(hap_seq, read, (size_t) ini->seed_lengths[h], (size_t) rlen, opt->score, opt->gap_p, opt->band, 1, NULL, &err, &alen); /* count for number of indels */ ana_alignment(aln, alen, rlen, &nindels, &nmismatch, opt->info); for(unsigned int r = 0; r<count;++r){ mod->eik[h*dat->sample_size+ idx_array[r]] = trans_nw(opt, aln, alen, nmismatch, nindels, error_profile, mod->err_encoding, dat->qmat[idx_array[r]], dat->n_quality, mod->adj_trunpois, rlen, dat->error_prob); debug_msg(DEBUG_III, fxn_debug, "num of indels: %i; num of " "mismatch: %i\n", nindels, nmismatch); } /* free */ if(aln){ free(aln[0]); free(aln[1]); free(aln); aln = NULL; } } } } if(opt->trans_matrix){ FILE *fp = fopen(opt->trans_matrix, "w"); //fprint_vectorized_matrix(fp,mod->eik,dat->sample_size,opt->K,0); not work for (size_t i = 0; i < dat->sample_size; ++i) { fprintf(fp, "%3lu", i); for (unsigned int j = 0; j < opt->K; ++j) { fprintf(fp, " %8.2e", mod->eik[j*dat->sample_size + i]); } fprintf(fp, "\n"); } fclose(fp); } /* simply update mod->pi */ assign_clusters(mod->eik, opt->K, dat->sample_size, ri->optimal_cluster_size, ri->optimal_cluster_id, 1); for (unsigned int k = 0; k < opt->K; ++k) { mod->pi[k] = (double) ri->optimal_cluster_size[k] / dat->sample_size; if (!mod->pi[k]) mod->pi[k] = 1.0 / dat->sample_size; // possible if given haplotypes mod->pi[k] = log(mod->pi[k]); } /* update mod->eik with new estimated mod->pi */ for (unsigned int r = 0; r<dat->sample_size; ++r) for (unsigned int k = 0; k < opt->K; ++k) mod->eik[k*dat->sample_size+r] += mod->pi[k]; /* reassign reads with updated mod->eik (unnormalized ) */ assign_clusters(mod->eik, opt->K, dat->sample_size, ri->optimal_cluster_size, ri->optimal_cluster_id, 1); /* filter with unnormalized mod->eik (pi* e_trans) */ likelihood_filter(opt->K, opt->ll_cutoff, mod->eik, NULL, NULL, dat->sample_size, ri); /* output the reads assignment */ FILE *fp = NULL; opt->outfile_info = opt->outfile_base; fp = fopen(opt->outfile_info, "w"); if (!fp) return mmessage(ERROR_MSG, FILE_OPEN_ERROR, opt->outfile_info); fprintf(fp, "assignments: "); fprint_assignment(fp, ri->optimal_cluster_id, dat->sample_size, opt->K, 2, 1); fprintf(fp, "cluster sizes: "); fprint_uints(fp, ri->optimal_cluster_size, opt->K, 3, 1); fclose(fp); mmessage(INFO_MSG, NO_ERROR, "Output the assignment" "file: %s \n", opt->outfile_info); return err; } /* reads_assignment */