source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
OOCFile64_old.h | #ifndef COMMON_OOCFILE64_H
#define COMMON_OOCFILE64_H
// debugging mode, tests for errors on functions calls and
// has some asserts to test correctness in debug build
//#define OOCFILE_DEBUG
// measure approximate cache hit rate
//#define OOCFILE_PROFILE
#include "LogManager.h"
#include "common.h"
#ifdef USE_OOC_FILE_LRU
#include "VDTActiveList.h"
#endif
// 64 bit addressing structure
typedef ULARGE_INTEGER OOCSize64;
template <class T>
class OOCFile64 {
public:
OOCFile64(const char * pFileName, int maxAllowedMem, int blockSize);
FORCEINLINE const T &operator[](unsigned int i);
~OOCFile64();
protected:
void FORCEINLINE unload(unsigned int tablePos);
void FORCEINLINE load(unsigned int tablePos);
void outputWindowsErrorMessage() {
DWORD ErrorCode = GetLastError();
LPVOID lpMsgBuf;
FormatMessage ( FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, 0, ErrorCode, 0, // Default language
(LPTSTR) &lpMsgBuf, 0, NULL );
cout << (char *)lpMsgBuf << endl;
}
void dumpCacheStats() {
LogManager *log = LogManager::getSingletonPtr();
char output[200];
sprintf(output, "Cache: %u access, %u misses. Cache hit rate: %f\n", cacheAccesses, cacheMisses, 100.0f - (float)cacheMisses/(float)cacheAccesses);
log->logMessage(LOG_INFO, output);
}
// handles for memory-mapped file
#ifdef WIN32
HANDLE m_hFile, m_hMapping;
#else
int m_FD;
#endif
OOCSize64 m_fileSize;
// size of individual mappings:
unsigned int m_BlockSize;
// same, as power of two
unsigned int m_BlockSizePowerTwo;
// (2^m_BlockSizePowerTwo - 1) for use as AND mask
unsigned int m_BlockMaskToOffset;
// mapping granularity from OS
unsigned int m_OSMappingGranularity;
// num cache entries:
unsigned int m_NumCacheEntries;
unsigned int m_NumCacheEntriesPowerTwo;
unsigned int m_CacheNumMask;
// maximally used memory (in bytes):
unsigned int m_MaxAllowedMem;
int m_MaxNumPage; // maximum different Pages in the original data
#ifdef USE_OOC_FILE_LRU
class LRUEntry {
public:
unsigned int tablePos;
unsigned int pageID;
LRUEntry* m_pNext, *m_pPrev;
LRUEntry()
{
tablePos = 0;
m_pNext = m_pPrev = NULL;
}
};
#endif
// cache entry in table:
typedef struct CacheEntry_t {
char *address;
OOCSize64 fileStartOffset;
#ifdef USE_OOC_FILE_LRU
LRUEntry *entryLRU;
#endif
} CacheEntry;
// main table of cache entries
CacheEntry *m_CacheTable;
// for profiling
unsigned int cacheAccesses;
unsigned int cacheMisses;
#ifdef USE_OOC_FILE_LRU
// for LRU management
int m_NumCachedPage; // maximum cached Pages in the manager
int m_CurNumCachedPage; // current # of cached Pages
int m_LastAccessedPage[NUM_THREADS];
CActiveList <LRUEntry *> m_LRUList;
int * m_Loaded; // indicate idx of cached Page if loaded
int UNLOADED;
#endif
#ifdef _USE_OPENMP
omp_lock_t *lck;
#endif
};
#ifdef WORKING_SET_COMPUTATION
extern CGPUCacheSim g_kdTreeSim;
extern CGPUCacheSim g_kdTreeSimL2;
extern CGPUCacheSim g_kdIdxSim;
extern CGPUCacheSim g_TriSim;
extern CGPUCacheSim g_VerSim;
extern CGPUCacheSim g_LODSim;
#endif
// constructor
template <class T>
OOCFile64<T>::OOCFile64(const char * pFileName, int maxAllowedMem, int blockSize) {
SYSTEM_INFO systemInfo;
BY_HANDLE_FILE_INFORMATION fileInfo;
LogManager *log = LogManager::getSingletonPtr();
char output[200];
m_MaxAllowedMem = maxAllowedMem;
// get windows allocation granularity:
GetSystemInfo(&systemInfo);
m_OSMappingGranularity = systemInfo.dwAllocationGranularity;
// open file:
if (! (m_hFile = CreateFile(pFileName, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS, NULL))) {
cerr << "OOCFile64: Cannot open file: " << pFileName << endl;
outputWindowsErrorMessage();
exit (-1);
}
// get file size:
GetFileInformationByHandle(m_hFile, &fileInfo);
m_fileSize.LowPart = fileInfo.nFileSizeLow;
m_fileSize.HighPart = fileInfo.nFileSizeHigh;
if (!(m_hMapping = CreateFileMapping(m_hFile, NULL, PAGE_READONLY,
m_fileSize.HighPart, m_fileSize.LowPart, NULL))) {
cerr << "OOCFile64<" << sizeof(T) << ">: CreateFileMapping() failed" << endl;
outputWindowsErrorMessage();
exit (-1);
}
//
// determine block size:
//
m_BlockSize = blockSize;
if (m_BlockSize % sizeof(T) != 0) {
sprintf(output, "OOCFile64: cache block size need to be multiple of structure size! sizeof(T) = %d, block size = %d", sizeof(T), m_BlockSize);
log->logMessage(LOG_ERROR, output);
}
m_BlockSizePowerTwo = (unsigned int)(log10((double)m_BlockSize) / log10(2.0));
m_BlockMaskToOffset = (1<<m_BlockSizePowerTwo) - 1;
//
// determine number of blocks:
//
// make max mem a multiple of block size:
m_MaxAllowedMem = (m_MaxAllowedMem >> m_BlockSizePowerTwo) << m_BlockSizePowerTwo;
m_NumCacheEntries = m_MaxAllowedMem / m_BlockSize;
sprintf(output, "OOCFile64: total:%u bytes (%u KB) in %d entries of %d KB\n", m_MaxAllowedMem, m_MaxAllowedMem/1024, m_NumCacheEntries, m_BlockSize / 1024);
log->logMessage(LOG_DEBUG, output);
m_NumCacheEntriesPowerTwo = (unsigned int)(log10((double)m_NumCacheEntries) / log10(2.0));
m_CacheNumMask = m_NumCacheEntries - 1;
//
// init cache table:
//
m_CacheTable = new CacheEntry[m_NumCacheEntries];
memset(m_CacheTable, 0, sizeof(CacheEntry) * m_NumCacheEntries);
cacheAccesses = 0;
cacheMisses = 0;
#ifdef USE_OOC_FILE_LRU
m_CurNumCachedPage = -1;
m_NumCachedPage = m_NumCacheEntries;
int i;
for(i=0;i<NUM_THREADS;i++)
m_LastAccessedPage[i] = -1;
// init LRU list
LRUEntry *pStartHead = new LRUEntry;
LRUEntry *pEndHead = new LRUEntry;
m_LRUList.InitList (pStartHead, pEndHead);
m_MaxNumPage = int (ceil (float(m_fileSize.QuadPart / m_BlockSize))) + 1;
m_Loaded = new int [m_MaxNumPage];
UNLOADED = -1;
for (i = 0;i < m_MaxNumPage;i++)
{
m_Loaded [i] = UNLOADED;
}
#endif
#ifdef _USE_OPENMP
lck = new omp_lock_t[m_MaxNumPage];
for(i=0;i<m_MaxNumPage;i++)
{
omp_init_lock(&lck[i]);
}
#endif
}
template <class T>
OOCFile64<T>::~OOCFile64() {
// unmap all existing cache entries
for (unsigned int i = 0; i < m_NumCacheEntries; i++) {
if (m_CacheTable[i].address != NULL)
unload(i);
}
// close file mapping and file:
CloseHandle (m_hMapping);
CloseHandle (m_hFile);
#ifdef OOCFILE_PROFILE
dumpCacheStats();
#endif
#ifdef USE_OOC_FILE_LRU
if (m_Loaded) {
delete [] m_Loaded;
m_Loaded = NULL;
}
#endif
#ifdef _USE_OPENMP
int i;
for(i=0;i<m_MaxNumPage;i++)
{
omp_destroy_lock(&lck[i]);
}
delete[] lck;
#endif
}
/*
// main access operator, i is array offset (i.e. depends on sizeof(T))!
template <class T>
FORCEINLINE const T& OOCFile64<T>::operator[](unsigned int i) {
OOCSize64 j, startOffset;
// find pos in table for this i
j.QuadPart = (__int64)i * (__int64)sizeof(T);
startOffset.QuadPart = (j.QuadPart >> (__int64)m_BlockSizePowerTwo);
unsigned int tablePos = startOffset.LowPart & m_CacheNumMask;
#ifdef OOCFILE_DEBUG
assert(j.QuadPart < m_fileSize.QuadPart);
assert(j.QuadPart % sizeof(T) == 0);
//cout << "access(" << i << "): tablePos=" << tablePos << ", startOffset=" << startOffset << " in Table: " << m_CacheTable[tablePos].fileStartOffset << endl;
#endif
#ifdef OOCFILE_PROFILE
cacheAccesses++;
#endif
// check if cache entry valid for this i:
if (m_CacheTable[tablePos].address != NULL && m_CacheTable[tablePos].fileStartOffset.QuadPart == startOffset.QuadPart) {
// yes: return pointer
return *((T *)(m_CacheTable[tablePos].address + (j.LowPart & m_BlockMaskToOffset)));
}
else {
#ifdef OOCFILE_PROFILE
cacheMisses++;
#endif
// no: unmap and map new
if (m_CacheTable[tablePos].address)
unload(tablePos);
m_CacheTable[tablePos].fileStartOffset.QuadPart = startOffset.QuadPart;
load(tablePos);
return *((T *)(m_CacheTable[tablePos].address + (j.LowPart & m_BlockMaskToOffset)));
}
}
*/
// main access operator, i is array offset (i.e. depends on sizeof(T))!
FORCEINLINE const Triangle& OOCFile64<Triangle>::operator[](unsigned int i) {
OOCSize64 j, startOffset;
// find pos in table for this i
j.QuadPart = (__int64)i * (__int64)sizeof(Triangle);
startOffset.QuadPart = (j.QuadPart >> (__int64)m_BlockSizePowerTwo);
unsigned int tablePos = startOffset.LowPart & m_CacheNumMask;
#ifdef WORKING_SET_COMPUTATION
unsigned int Idx = j.QuadPart / sizeof (Triangle);
g_TriSim.Access (Idx);
#endif
#ifdef OOCFILE_DEBUG
assert(j.QuadPart < m_fileSize.QuadPart);
assert(j.QuadPart % sizeof(Triangle) == 0);
//cout << "access(" << i << "): tablePos=" << tablePos << ", startOffset=" << startOffset << " in Table: " << m_CacheTable[tablePos].fileStartOffset << endl;
#endif
#ifdef OOCFILE_PROFILE
cacheAccesses++;
#endif
#ifdef USE_OOC_FILE_MOD
// check if cache entry valid for this i:
if (m_CacheTable[tablePos].address != NULL && m_CacheTable[tablePos].fileStartOffset.QuadPart == startOffset.QuadPart) {
// yes: return pointer
return *((Triangle *)(m_CacheTable[tablePos].address + (j.LowPart & m_BlockMaskToOffset)));
}
else {
#ifdef OOCFILE_PROFILE
cacheMisses++;
#endif
// no: unmap and map new
if (m_CacheTable[tablePos].address)
unload(tablePos);
m_CacheTable[tablePos].fileStartOffset.QuadPart = startOffset.QuadPart;
load(tablePos);
return *((Triangle *)(m_CacheTable[tablePos].address + (j.LowPart & m_BlockMaskToOffset)));
}
#endif
#ifdef USE_OOC_FILE_LRU
int PageID = startOffset.QuadPart;
tablePos = m_Loaded [PageID];
if (m_Loaded [PageID] == UNLOADED)
{
#ifdef _USE_OPENMP
omp_set_lock(&lck[PageID]);
#endif
if (m_Loaded [PageID] == UNLOADED)
{
if (m_CurNumCachedPage < m_NumCachedPage)
{
m_CurNumCachedPage++;
tablePos = m_CurNumCachedPage;
LRUEntry *newLRUEntry = new LRUEntry;
m_CacheTable[tablePos].entryLRU = newLRUEntry;
newLRUEntry->tablePos = tablePos;
newLRUEntry->pageID = PageID;
m_CacheTable[tablePos].fileStartOffset.QuadPart = startOffset.QuadPart;
m_LRUList.ForceAdd(newLRUEntry);
load(tablePos);
m_Loaded [PageID] = tablePos;
}
else
{
LRUEntry *pLeastUsed;
#ifdef _USE_OPENMP
#pragma omp critical
#endif
{
pLeastUsed = m_LRUList.m_pEnd->m_pPrev;
tablePos = pLeastUsed->tablePos;
unload(tablePos);
m_Loaded [pLeastUsed->pageID] = UNLOADED;
}
m_CacheTable[tablePos].fileStartOffset.QuadPart = startOffset.QuadPart;
pLeastUsed->pageID = PageID;
m_LRUList.ForceAdd(pLeastUsed);
load(tablePos);
m_Loaded [PageID] = tablePos;
}
}
#ifdef _USE_OPENMP
omp_unset_lock(&lck[PageID]);
#endif
}
#ifdef _USE_OPENMP
int thread_num = omp_get_thread_num();
#else
int thread_num = 0;
#endif
if (tablePos != m_LastAccessedPage[thread_num]) {
// manage LRU list, already loaded. So put it front.
m_LRUList.ForceAdd (m_CacheTable[tablePos].entryLRU);
m_LastAccessedPage[thread_num] = tablePos;
}
return *((Triangle *)(m_CacheTable[tablePos].address + (j.LowPart & m_BlockMaskToOffset)));
#endif
}
// main access operator, i is array offset (i.e. depends on sizeof(T))!
FORCEINLINE const unsigned int & OOCFile64<unsigned int >::operator[](unsigned int i) {
OOCSize64 j, startOffset;
// find pos in table for this i
//j.QuadPart = (__int64)i * (__int64)sizeof(unsigned int );
j.QuadPart = (__int64)i << 2;
startOffset.QuadPart = (j.QuadPart >> (__int64)m_BlockSizePowerTwo);
unsigned int tablePos = startOffset.LowPart & m_CacheNumMask;
#ifdef WORKING_SET_COMPUTATION
unsigned int Idx = j.QuadPart / sizeof (unsigned int);
g_kdIdxSim.Access (Idx);
#endif
#ifdef OOCFILE_DEBUG
assert(j.QuadPart < m_fileSize.QuadPart);
assert(j.QuadPart % sizeof(unsigned int) == 0);
//cout << "access(" << i << "): tablePos=" << tablePos << ", startOffset=" << startOffset << " in Table: " << m_CacheTable[tablePos].fileStartOffset << endl;
#endif
#ifdef OOCFILE_PROFILE
cacheAccesses++;
#endif
#ifdef USE_OOC_FILE_MOD
// check if cache entry valid for this i:
if (m_CacheTable[tablePos].address != NULL && m_CacheTable[tablePos].fileStartOffset.QuadPart == startOffset.QuadPart) {
// yes: return pointer
return *((unsigned int *)(m_CacheTable[tablePos].address + (j.LowPart & m_BlockMaskToOffset)));
}
else {
#ifdef OOCFILE_PROFILE
cacheMisses++;
#endif
// no: unmap and map new
if (m_CacheTable[tablePos].address)
unload(tablePos);
m_CacheTable[tablePos].fileStartOffset.QuadPart = startOffset.QuadPart;
load(tablePos);
return *((unsigned int *)(m_CacheTable[tablePos].address + (j.LowPart & m_BlockMaskToOffset)));
}
#endif
#ifdef USE_OOC_FILE_LRU
int PageID = startOffset.QuadPart;
tablePos = m_Loaded [PageID];
if (m_Loaded [PageID] == UNLOADED)
{
#ifdef _USE_OPENMP
omp_set_lock(&lck[PageID]);
#endif
if (m_Loaded [PageID] == UNLOADED)
{
if (m_CurNumCachedPage < m_NumCachedPage)
{
m_CurNumCachedPage++;
tablePos = m_CurNumCachedPage;
LRUEntry *newLRUEntry = new LRUEntry;
m_CacheTable[tablePos].entryLRU = newLRUEntry;
newLRUEntry->tablePos = tablePos;
newLRUEntry->pageID = PageID;
m_CacheTable[tablePos].fileStartOffset.QuadPart = startOffset.QuadPart;
m_LRUList.ForceAdd(newLRUEntry);
load(tablePos);
m_Loaded [PageID] = tablePos;
}
else
{
LRUEntry *pLeastUsed;
#ifdef _USE_OPENMP
#pragma omp critical
#endif
{
pLeastUsed = m_LRUList.m_pEnd->m_pPrev;
tablePos = pLeastUsed->tablePos;
unload(tablePos);
m_Loaded [pLeastUsed->pageID] = UNLOADED;
}
m_CacheTable[tablePos].fileStartOffset.QuadPart = startOffset.QuadPart;
pLeastUsed->pageID = PageID;
m_LRUList.ForceAdd(pLeastUsed);
load(tablePos);
m_Loaded [PageID] = tablePos;
}
}
#ifdef _USE_OPENMP
omp_unset_lock(&lck[PageID]);
#endif
}
#ifdef _USE_OPENMP
int thread_num = omp_get_thread_num();
#else
int thread_num = 0;
#endif
if (tablePos != m_LastAccessedPage[thread_num]) {
// manage LRU list, already loaded. So put it front.
m_LRUList.ForceAdd (m_CacheTable[tablePos].entryLRU);
m_LastAccessedPage[thread_num] = tablePos;
}
return *((unsigned int *)(m_CacheTable[tablePos].address + (j.LowPart & m_BlockMaskToOffset)));
#endif
}
FORCEINLINE const BSPArrayTreeNode& OOCFile64<BSPArrayTreeNode>::operator[](unsigned int i) {
OOCSize64 j, startOffset;
// find pos in table for this i
#if HIERARCHY_TYPE == TYPE_BVH
j.QuadPart = (__int64)i << (__int64)3;
#else
#ifdef KDTREENODE_16BYTES
#ifdef FOUR_BYTE_FOR_KD_NODE
j.QuadPart = (__int64)i << (__int64)4;
#else
j.QuadPart = (__int64)i << (__int64)3;
#endif
#else
j.QuadPart = (__int64)i << (__int64)2;
#endif
#endif
startOffset.QuadPart = j.QuadPart >> m_BlockSizePowerTwo;
unsigned int tablePos = startOffset.LowPart & m_CacheNumMask;
#ifdef WORKING_SET_COMPUTATION
unsigned int Idx = j.QuadPart / sizeof (BSPArrayTreeNode);
g_kdTreeSim.Access (Idx);
g_kdTreeSimL2.Access (Idx);
#endif
#ifdef OOCFILE_DEBUG
assert(j.QuadPart < m_fileSize.QuadPart);
assert(j.QuadPart % sizeof(T) == 0);
cout << "access(" << i << " | " << j.QuadPart << "): tablePos=" << tablePos << ", startOffset=" << startOffset.HighPart << " " << startOffset.LowPart << "(=" << startOffset.QuadPart << ") in Table: " << m_CacheTable[tablePos].fileStartOffset.HighPart << " " << m_CacheTable[tablePos].fileStartOffset.LowPart << endl;
#endif
#ifdef OOCFILE_PROFILE
cacheAccesses++;
#endif
#ifdef USE_OOC_FILE_MOD
// check if cache entry valid for this i:
if (m_CacheTable[tablePos].address != NULL && m_CacheTable[tablePos].fileStartOffset.QuadPart == startOffset.QuadPart) {
// yes: return pointer
return *((BSPArrayTreeNode *)(m_CacheTable[tablePos].address + (j.LowPart & m_BlockMaskToOffset)));
}
else {
#ifdef OOCFILE_PROFILE
cacheMisses++;
#endif
// no: unmap and map new
if (m_CacheTable[tablePos].address)
unload(tablePos);
m_CacheTable[tablePos].fileStartOffset.QuadPart = startOffset.QuadPart;
load(tablePos);
return *((BSPArrayTreeNode *)(m_CacheTable[tablePos].address + (j.LowPart & m_BlockMaskToOffset)));
}
#endif
#ifdef USE_OOC_FILE_LRU
int PageID = startOffset.QuadPart;
tablePos = m_Loaded [PageID];
if (m_Loaded [PageID] == UNLOADED)
{
#ifdef _USE_OPENMP
omp_set_lock(&lck[PageID]);
#endif
if (m_Loaded [PageID] == UNLOADED)
{
if (m_CurNumCachedPage < m_NumCachedPage)
{
m_CurNumCachedPage++;
tablePos = m_CurNumCachedPage;
LRUEntry *newLRUEntry = new LRUEntry;
m_CacheTable[tablePos].entryLRU = newLRUEntry;
newLRUEntry->tablePos = tablePos;
newLRUEntry->pageID = PageID;
m_CacheTable[tablePos].fileStartOffset.QuadPart = startOffset.QuadPart;
m_LRUList.ForceAdd(newLRUEntry);
load(tablePos);
m_Loaded [PageID] = tablePos;
}
else
{
LRUEntry *pLeastUsed;
#ifdef _USE_OPENMP
#pragma omp critical
#endif
{
pLeastUsed = m_LRUList.m_pEnd->m_pPrev;
tablePos = pLeastUsed->tablePos;
unload(tablePos);
m_Loaded [pLeastUsed->pageID] = UNLOADED;
}
m_CacheTable[tablePos].fileStartOffset.QuadPart = startOffset.QuadPart;
pLeastUsed->pageID = PageID;
m_LRUList.ForceAdd(pLeastUsed);
load(tablePos);
m_Loaded [PageID] = tablePos;
}
}
#ifdef _USE_OPENMP
omp_unset_lock(&lck[PageID]);
#endif
}
#ifdef _USE_OPENMP
int thread_num = omp_get_thread_num();
#else
int thread_num = 0;
#endif
if (tablePos != m_LastAccessedPage[thread_num]) {
// manage LRU list, already loaded. So put it front.
m_LRUList.ForceAdd (m_CacheTable[tablePos].entryLRU);
m_LastAccessedPage[thread_num] = tablePos;
}
return *((BSPArrayTreeNode *)(m_CacheTable[tablePos].address + (j.LowPart & m_BlockMaskToOffset)));
#endif
}
// unload the specified cache entry
template <class T>
void FORCEINLINE OOCFile64<T>::unload(unsigned int tablePos) {
#ifdef OOCFILE_DEBUG
if (!UnmapViewOfFile(m_CacheTable[tablePos].address)) {
cerr << "UnmapViewOfFile(" << (unsigned int)m_CacheTable[tablePos].address << ") failed:" << endl;
outputWindowsErrorMessage();
}
//cout << "UnmapViewOfFile(" << (unsigned int)m_CacheTable[tablePos].address << ")" << endl;
#else
UnmapViewOfFile(m_CacheTable[tablePos].address);
#endif
}
// load the specified cache entry into mapped memory
template <class T>
void FORCEINLINE OOCFile64<T>::load(unsigned int tablePos) {
unsigned int blockSize;
OOCSize64 startAddr;
startAddr.QuadPart = m_CacheTable[tablePos].fileStartOffset.QuadPart << m_BlockSizePowerTwo;
if (startAddr.QuadPart + m_BlockSize > m_fileSize.QuadPart)
blockSize = 0;
else
blockSize = m_BlockSize;
#ifdef OOCFILE_DEBUG
if (!(m_CacheTable[tablePos].address = (char *)MapViewOfFile(m_hMapping, FILE_MAP_READ, startAddr.HighPart, startAddr.LowPart, blockSize))) {
cerr << "MapViewOfFile(" << (m_CacheTable[tablePos].fileStartOffset.QuadPart << m_BlockSizePowerTwo) << ", " << blockSize << ") failed:" << endl;
outputWindowsErrorMessage();
}
cout << "MapViewOfFile(" << (m_CacheTable[tablePos].fileStartOffset.QuadPart << m_BlockSizePowerTwo) << ", " << blockSize << ") = " << (unsigned int)m_CacheTable[tablePos].address << endl;
#else
m_CacheTable[tablePos].address = (char *)MapViewOfFile(m_hMapping, FILE_MAP_READ, startAddr.HighPart, startAddr.LowPart, blockSize);
#endif
}
#undef OOCFILE_PROFILE
#undef OOCFILE_DEBUG
#endif |
ordering_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file ordering_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#include <mxnet/operator_util.h>
#include <dmlc/optional.h>
#include <mshadow/tensor.h>
#include <algorithm>
#include <vector>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "./sort_op.h"
#include "./indexing_op.h"
namespace mshadow {
template<typename xpu, int src_dim, typename DType, int dst_dim>
inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src,
Shape<dst_dim> target_shape) {
CHECK_EQ(src.CheckContiguous(), true);
return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_);
}
};
namespace mxnet {
namespace op {
// These enums are only visible within this header
namespace topk_enum {
enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth};
} // topk_enum
struct TopKParam : public dmlc::Parameter<TopKParam> {
dmlc::optional<int> axis;
int k;
int ret_typ;
bool is_ascend;
DMLC_DECLARE_PARAMETER(TopKParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose the top k indices."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(k).set_default(1)
.describe("Number of top elements to select,"
" should be always smaller than or equal to the element number in the given axis."
" A global sort is performed if set k < 1.");
DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices)
.add_enum("value", topk_enum::kReturnValue)
.add_enum("indices", topk_enum::kReturnIndices)
.add_enum("mask", topk_enum::kReturnMask)
.add_enum("both", topk_enum::kReturnBoth)
.describe("The return type.\n"
" \"value\" means to return the top k values,"
" \"indices\" means to return the indices of the top k values,"
" \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values."
" \"both\" means to return a list of both values and indices of top k elements.");
DMLC_DECLARE_FIELD(is_ascend).set_default(false)
.describe("Whether to choose k largest or k smallest elements."
" Top K largest elements will be chosen if set to false.");
}
};
struct SortParam : public dmlc::Parameter<SortParam> {
dmlc::optional<int> axis;
bool is_ascend;
DMLC_DECLARE_PARAMETER(SortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
}
};
struct ArgSortParam : public dmlc::Parameter<ArgSortParam> {
dmlc::optional<int> axis;
bool is_ascend;
DMLC_DECLARE_PARAMETER(ArgSortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
}
};
inline void ParseTopKParam(const TShape& src_shape, const TopKParam& param, TShape *target_shape,
int *batch_size, int *element_num, int *axis, int *k,
bool *do_transpose, bool *is_ascend) {
*do_transpose = false;
*k = param.k;
*is_ascend = param.is_ascend;
// get batch_size, axis and element_num
if (!static_cast<bool>(param.axis)) { // No axis given
*axis = 0;
*batch_size = 1;
*element_num = src_shape.Size();
} else {
*axis = param.axis.value();
if (*axis < 0) {
*axis += src_shape.ndim();
}
CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim()))
<< "Invalid axis! axis should be between 0 and "
<< src_shape.ndim() << ", found axis=" << *axis;
*batch_size = src_shape.Size() / src_shape[*axis];
*element_num = src_shape[*axis];
if (*axis != static_cast<int>(src_shape.ndim()) - 1) {
*do_transpose = true;
}
}
// get k
if (param.k <= 0) {
*k = *element_num;
}
// get target_shape
if (!static_cast<bool>(param.axis)) {
if (param.ret_typ != topk_enum::kReturnMask) {
*target_shape = mshadow::Shape1(*k);
} else {
*target_shape = src_shape;
}
} else {
*target_shape = src_shape;
if (param.ret_typ != topk_enum::kReturnMask) {
(*target_shape)[*axis] = *k;
}
}
CHECK(*k >= 1 && *k <= *element_num) << "k must be smaller than "
<< *element_num << ", get k = " << *k;
}
using namespace mshadow;
template<typename xpu>
void TopKSort(const Tensor<xpu, 1, real_t>& dat,
const Tensor<xpu, 1, int>& ind,
const Tensor<xpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<xpu> *s);
template<>
MSHADOW_FORCE_INLINE void TopKSort<cpu>(const Tensor<cpu, 1, real_t>& dat,
const Tensor<cpu, 1, int>& ind,
const Tensor<cpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<cpu> *s) {
// Use full sort when K is relatively large.
const bool full_sort(K*8 > N);
// Batch size.
const int M(work.size(0)/(sizeof(real_t)*N));
const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount());
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < M; ++i) {
// Tensor `work` stores the flattened source data, while `dat` stores the sorted result.
real_t *vals = reinterpret_cast<real_t*>(work.dptr_);
real_t *sorted_vals = dat.dptr_+i*N;
int *indices = ind.dptr_+i*N;
if (is_ascend) {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
}
} else {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
}
}
for (int j = 0; j < K; ++j) {
sorted_vals[j] = vals[indices[j]];
}
}
}
#ifdef __CUDACC__
template<typename DType>
MSHADOW_XINLINE bool TopKCompare(DType val1, int ind1, DType val2, int ind2, bool is_ascend) {
// Negative indices denote undefined values which are considered arbitrary small resp. large.
return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2)));
}
template<typename DType>
MSHADOW_XINLINE void MergeTopK(int K, DType *val1, int *ind1, DType *val2, int *ind2,
bool is_ascend) {
// In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals
// [0,..,i1], [0,..i2] of the two lists that will be part of the merged list.
int i1(K-1), i2(K-1);
for (int i = 0; i < K; ++i) {
if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) {
--i2;
} else {
--i1;
}
}
// Now merge the lists from back to front.
for (int i = K; i--;) {
if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) {
val1[i] = val1[i1];
ind1[i] = ind1[i1];
--i1;
} else {
val1[i] = val2[i2];
ind1[i] = ind2[i2];
--i2;
}
}
}
template<typename DType>
__global__ void PartialSortSmallK(int K, int N, DType *val, int *ind, bool is_ascend) {
// Buffer for pairwise reduction.
extern __shared__ int buff[];
// Start of buffer sections associated with this thread.
const int offset(threadIdx.x*K);
int *ind_buff = &buff[offset];
DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset;
// Initialize top-K values for this thread.
for (int i = 0; i < K; ++i) {
ind_buff[i] = -1;
}
// Range of values this thread cares about. Each thread block processes
// a different batch item (i.e. a different set of ind/val where we
// have to select the top-K elements). All threads within the same
// block work on the same batch item.
const int first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N);
// Select top-K from this range and store it sorted in the buffer.
// We assume a small K, so linear insertion is o.k.
for (int i = first; i < last; i += blockDim.x) {
DType cur_val(val[i]);
int cur_ind(ind[i]);
for (int j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) {
if (j+1 < K) {
val_buff[j+1] = val_buff[j];
ind_buff[j+1] = ind_buff[j];
}
val_buff[j] = cur_val;
ind_buff[j] = cur_ind;
}
}
// Recursive merge of sorted lists for this thread block. Note that blockDim.x is not
// necessary a power of two, therefore the additional checks for last_s.
for (unsigned int s = (blockDim.x+1)/2, last_s = blockDim.x;
last_s > 1; last_s = s, s = (s+1)/2) {
__syncthreads();
if (threadIdx.x < s && threadIdx.x+s < last_s) {
MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend);
}
}
// Final updates on master thread.
if (threadIdx.x == 0) {
for (int i = 0; i < K; ++i) {
ind[blockIdx.x*N+i] = ind_buff[i];
val[blockIdx.x*N+i] = val_buff[i];
}
}
}
template<>
MSHADOW_FORCE_INLINE void TopKSort<gpu>(const Tensor<gpu, 1, real_t>& dat,
const Tensor<gpu, 1, int>& ind,
const Tensor<gpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<gpu> *s) {
// Use full sort for all but very small K for which we
// can do a partial sort entirely within shared memory.
const bool full_sort(K > 5);
// Batch size.
const int M(dat.size(0)/N);
if (full_sort) {
// Divide workspace into two parts. The first one is needed to store batch ids.
const int id_size(sizeof(int)*ind.size(0));
Tensor<gpu, 1, int> batch_id(reinterpret_cast<int*>(work.dptr_), Shape1(ind.size(0)), s);
Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s);
mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work);
if (M > 1) {
// Back to back sorting. Note that mxnet::op::SortByKey is a stable sort.
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, dat, true, &sort_work);
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, ind, true, &sort_work);
}
} else {
const int nthreads(mshadow::cuda::kBaseThreadNum);
PartialSortSmallK<<<M, nthreads, nthreads*K*(sizeof(int)+sizeof(real_t)),
mshadow::Stream<gpu>::GetStream(s)>>>
(K, N, dat.dptr_, ind.dptr_, is_ascend);
}
}
#endif
/*!
* \brief Implementation of the TopK operation
*
*
* \param ctx the running context
* \param resource temporary resource handler
* \param src the Source blob
* \param ret the destination blobs
* \param k the K elements to keep
* \param param the topk parameters
* \tparam xpu the device type.
*/
template<typename xpu>
void TopKImpl(RunContext ctx,
Resource resource,
const TBlob& src,
const std::vector<TBlob>& ret,
const TopKParam& param) {
using namespace mshadow;
using namespace mshadow::expr;
for (auto ret_ele : ret) {
CHECK_EQ(ret_ele.type_flag_, src.type_flag_);
}
// 1. Parse and initialize information
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 1, char> workspace;
Tensor<xpu, 1, char> temp_workspace;
Tensor<xpu, 1, real_t> sorted_dat;
Tensor<xpu, 1, int> indices, sel_indices;
Tensor<xpu, 2, real_t> mask_val;
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(src.shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
Tensor<xpu, 3, real_t> dat = src.FlatTo3D<xpu, real_t>(axis, axis, s);
size_t temp_size = 0;
// Temp space needed by the gpu-based full sorts.
temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, int, xpu>(src.Size()));
temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, real_t, xpu>(src.Size()));
temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<real_t, int, xpu>(src.Size()));
// Additional temp space for gpu full sorts for batch ids.
temp_size += sizeof(int) * src.Size();
// Temp space for cpu sorts.
temp_size = std::max(temp_size, sizeof(real_t) * src.Size());
size_t workspace_size = temp_size + sizeof(real_t) * src.Size() + sizeof(int) * src.Size();
if (param.ret_typ == topk_enum::kReturnMask) {
workspace_size += sizeof(int) * batch_size * k + sizeof(real_t) * batch_size * k;
}
workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
char* workspace_curr_ptr = workspace.dptr_;
sorted_dat = Tensor<xpu, 1, real_t>(reinterpret_cast<real_t*>(workspace_curr_ptr),
Shape1(src.Size()), s); // contain sorted dat
workspace_curr_ptr += sizeof(real_t) * src.Size();
indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(src.Size()), s); // indices in the original matrix
workspace_curr_ptr += sizeof(int) * src.Size();
if (param.ret_typ == topk_enum::kReturnMask) {
sel_indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(batch_size * k), s);
workspace_curr_ptr += sizeof(int) * batch_size * k;
mask_val = Tensor<xpu, 2, real_t>(reinterpret_cast<real_t*>(workspace_curr_ptr),
Shape2(batch_size * k, 1), s);
workspace_curr_ptr += sizeof(real_t) * batch_size * k;
mask_val = scalar<real_t>(1);
CHECK_EQ(sel_indices.CheckContiguous(), true);
CHECK_EQ(mask_val.CheckContiguous(), true);
}
if (std::is_same<xpu, cpu>::value) {
Tensor<xpu, 1, real_t> flattened_data;
if (do_transpose) {
flattened_data = Tensor<xpu, 1, real_t>(reinterpret_cast<real_t*>(workspace_curr_ptr),
Shape1(src.Size()), s);
workspace_curr_ptr += sizeof(real_t) * src.Size();
flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
CHECK_EQ(flattened_data.CheckContiguous(), true);
} else {
flattened_data = src.FlatTo1D<xpu, real_t>(s);
}
// `temp_workspace` stores the flattened data
temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_),
Shape1(sizeof(real_t)*src.Size()), s);
CHECK_EQ(temp_workspace.CheckContiguous(), true);
} else {
if (do_transpose) {
sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
} else {
sorted_dat = reshape(dat, Shape1(src.Size()));
}
CHECK_EQ(sorted_dat.CheckContiguous(), true);
temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space
workspace_curr_ptr += temp_size;
}
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, 0, 1,
kWriteTo, indices.dptr_);
CHECK_EQ(indices.CheckContiguous(), true);
// 2. Perform inplace batch sort.
// After sorting, each batch in `sorted_dat` will be sorted in the corresponding order
// up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat`
// `temp_workspace` is used to store the flattend source data for CPU device, and it's used as
// a temporal buffer for GPU device.
TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s);
// 3. Assign results to the ret blob
// When returning indices, only update(modulo) required elements instead of full elements
// to avoid redundant calculation.
// Cast `ret_indices` from int to real_t could introduce conversion error when the element_num
// is large enough.
if (param.ret_typ == topk_enum::kReturnMask) {
Tensor<xpu, 2, real_t> ret_mask =
ret[0].get_with_shape<xpu, 2, real_t>(Shape2(ret[0].Size(), 1), s);
ret_mask = scalar<real_t>(0);
sel_indices = reshape(slice<1>(
inplace_reshape(indices,
Shape2(batch_size,
element_num)), 0, k),
Shape1(batch_size * k));
if (do_transpose) {
TShape src_shape = src.shape_.FlatTo3D(axis);
CHECK_EQ(sel_indices.CheckContiguous(), true);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
}
IndexFill(ret_mask, sel_indices, mask_val);
} else if (param.ret_typ == topk_enum::kReturnIndices) {
if (do_transpose) {
Tensor<xpu, 3, real_t> ret_indices = ret[0].FlatTo3D<xpu, real_t>(axis, axis, s);
ret_indices = tcast<real_t>(F<mshadow_op::mod>(
transpose(slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k),
Shape3(0, 2, 1)),
element_num));
} else {
Tensor<xpu, 2, real_t> ret_indices =
ret[0].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s);
ret_indices = tcast<real_t>(F<mshadow_op::mod>(
slice<1>(inplace_reshape(indices, Shape2(batch_size, element_num)),
0, k),
element_num));
}
} else {
if (do_transpose) {
Tensor<xpu, 3, real_t> ret_value = ret[0].FlatTo3D<xpu, real_t>(axis, axis, s);
Tensor<xpu, 3, real_t> ret_indices = ret[1].FlatTo3D<xpu, real_t>(axis, axis, s);
ret_value = transpose(
slice<2>(inplace_reshape(sorted_dat,
Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)),
0, k),
Shape3(0, 2, 1));
ret_indices = tcast<real_t>(F<mshadow_op::mod>(
transpose(slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k),
Shape3(0, 2, 1)),
element_num));
} else {
Tensor<xpu, 2, real_t> ret_value =
ret[0].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s);
Tensor<xpu, 2, real_t> ret_indices =
ret[1].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s);
ret_value = slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k);
ret_indices = tcast<real_t>(F<mshadow_op::mod>(
slice<1>(inplace_reshape(indices, Shape2(batch_size, element_num)),
0, k),
element_num));
}
}
}
template<typename xpu>
void TopK(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
// TODO(sxjscience) We can support inplace in the future
CHECK_EQ(req[0], kWriteTo) << "TopK does not support inplace";
TopKImpl<xpu>(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, param);
}
template<typename xpu>
void Sort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
CHECK_EQ(req[0], kWriteTo) << "Sort does not support inplace";
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
TopKImpl<xpu>(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, topk_param);
}
template<typename xpu>
void ArgSort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
CHECK_EQ(req[0], kWriteTo) << "ArgSort does not support inplace";
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnIndices;
TopKImpl<xpu>(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, topk_param);
}
template<typename xpu>
void TopKBackward_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_NE(req[0], kWriteInplace);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>();
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth);
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(outputs[0].shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
Tensor<xpu, 1, real_t> workspace =
ctx.requested[0].get_space_typed<xpu, 1, real_t>(Shape1(batch_size * k * 2 + batch_size), s);
Tensor<xpu, 1, real_t> sel_indices =
Tensor<xpu, 1, real_t>(workspace.dptr_, Shape1(batch_size * k), s);
Tensor<xpu, 1, real_t> batch_shift =
Tensor<xpu, 1, real_t>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s);
Tensor<xpu, 1, real_t> dummy_index =
Tensor<xpu, 1, real_t>(workspace.dptr_ + batch_size * k + batch_size,
Shape1(batch_size * k), s);
Tensor<xpu, 2, real_t> out_grad =
inputs[0].get_with_shape<xpu, 2, real_t>(Shape2(inputs[0].shape_.Size(), 1), s);
Tensor<xpu, 2, real_t> in_grad =
outputs[0].get_with_shape<xpu, 2, real_t>(Shape2(outputs[0].shape_.Size(), 1), s);
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, 0.0f,
static_cast<real_t>(element_num), kWriteTo, batch_shift.dptr_);
if (do_transpose) {
Tensor<xpu, 1, real_t> indices = inputs[2].FlatTo1D<xpu, real_t>(s);
TShape src_shape = outputs[0].shape_.FlatTo3D(axis);
sel_indices = reshape(transpose(
broadcast_to(inplace_reshape(batch_shift,
Shape3(src_shape[0], src_shape[2], 1)),
TShape(Shape3(src_shape[0], src_shape[2], k))),
Shape3(0, 2, 1)),
Shape1(batch_size * k));
sel_indices += indices;
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
} else {
Tensor<xpu, 2, real_t> indices =
inputs[2].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s);
sel_indices = reshape(indices +
broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)),
TShape(Shape2(batch_size, k))),
Shape1(batch_size * k));
}
CHECK_EQ(sel_indices.CheckContiguous(), true);
if (kWriteTo == req[0]) {
in_grad = scalar<real_t>(0);
IndexFill(in_grad, sel_indices, out_grad);
} else if (kAddTo == req[0]) {
// TODO(sxjscience) We can use AddTakeGrad in the future.
// However, the current implementation of AddTakeGrad is not so efficient.
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, sel_indices.shape_.Size(), 1, 0.0f,
1.0f, kWriteTo, dummy_index.dptr_);
mxnet::op::AddTakeGradLargeBatch(in_grad, sel_indices, dummy_index, out_grad);
} else if (kNullOp == req[0]) {
return;
} else {
LOG(FATAL) << "Not Implemented!";
}
}
inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
return static_cast<uint32_t>(1);
} else {
return static_cast<uint32_t>(2);
}
}
inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
return static_cast<uint32_t>(2);
} else {
return static_cast<uint32_t>(1);
}
}
inline bool TopKType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
return ElemwiseAttr<int, type_is_none, type_assign, true, type_string>(
attrs, in_attrs, out_attrs, -1);
}
inline bool TopKShapeImpl(const TopKParam& param,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
CHECK_EQ(out_attrs->size(), 1U);
} else {
CHECK_EQ(out_attrs->size(), 2U);
}
TShape& in_shape = (*in_attrs)[0];
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(in_shape, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape);
}
return true;
}
inline bool TopKShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
return TopKShapeImpl(param, in_attrs, out_attrs);
}
inline bool SortShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
inline bool ArgSortShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnIndices;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
|
d2q9-bgk.c | /*
** Code to implement a d2q9-bgk lattice boltzmann scheme.
** 'd2' inidates a 2-dimensional grid, and
** 'q9' indicates 9 velocities per grid cell.
** 'bgk' refers to the Bhatnagar-Gross-Krook collision step.
**
** The 'speeds' in each cell are numbered as follows:
**
** 6 2 5
** \|/
** 3-0-1
** /|\
** 7 4 8
**
** A 2D grid:
**
** cols
** --- --- ---
** | D | E | F |
** rows --- --- ---
** | A | B | C |
** --- --- ---
**
** 'unwrapped' in row major order to give a 1D array:
**
** --- --- --- --- --- ---
** | A | B | C | D | E | F |
** --- --- --- --- --- ---
**
** Grid indicies are:
**
** ny
** ^ cols(jj)
** | ----- ----- -----
** | | ... | ... | etc |
** | ----- ----- -----
** rows(ii) | | 1,0 | 1,1 | 1,2 |
** | ----- ----- -----
** | | 0,0 | 0,1 | 0,2 |
** | ----- ----- -----
** ----------------------> nx
**
** Note the names of the input parameter and obstacle files
** are passed on the command line, e.g.:
**
** d2q9-bgk.exe input.params obstacles.dat
**
** Be sure to adjust the grid dimensions in the parameter file
** if you choose a different obstacle file.
*/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#include<sys/time.h>
#include<sys/resource.h>
#include<omp.h>
#include "mpi.h"
//#include<fenv.h>
#define NSPEEDS 9
#define FINALSTATEFILE "final_state.dat"
#define AVVELSFILE "av_vels.dat"
#define BLOCKSIZE 16 //Not used
#define NUMTHREADS 16 //MAX
#define MPI_PROCS 64 //MAX
#define MASTER 0
//#define PAR //Comment this out for no OpenMP
//Vector size
#define VECSIZE 8
MPI_Datatype MPI_ROW_OF_OBSTACLES;
MPI_Datatype MPI_TCELL;
MPI_Datatype MPI_ROW_OF_CELLS;
/* struct to hold the parameter values */
struct __declspec(align(32)) t_param
{
float density; /* density per link */
float accel; /* density redistribution */
float omega; /* relaxation parameter */
float free_cells_inv;
int nx; /* no. of cells in x-direction */
int ny; /* no. of cells in y-direction */
int maxIters; /* no. of iterations */
int reynolds_dim; /* dimension for Reynolds number */
};
typedef struct t_param t_param;
/* struct to hold the 'speed' values */
typedef struct
{
float speeds[NSPEEDS];
} t_speed;
/*
** function prototypes
*/
/* load params, allocate memory, load obstacles & initialise fluid particle densities */
int initialise(char* paramfile, const char* obstaclefile,
t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr,
int** obstacles_ptr, float** av_vels_ptr, float** av_vels_local_ptr, int rank, int size,
int* ny_local, int* displs);
void preprocess_obstacles(int* obstacles,const t_param params);
/*
** The main calculation methods.
** timestep calls, in order, the functions:
** accelerate_flow(), propagate(), rebound() & collision()
*/
int accelerate_flow(const t_param params, t_speed* restrict cells, int* restrict obstacles,
int rank, int* ny_local);
//int propagate(const t_param params, t_speed** cells_ptr, t_speed** tmp_cells_ptr);
//int rebound(const t_param params, t_speed** cells_ptr, t_speed** tmp_cells_ptr, int* obstacles);
//int collision(const t_param params, t_speed** cells_ptr, t_speed** tmp_cells_ptr, int* obstacles);
float timestep(const t_param params, t_speed* restrict cells, t_speed* restrict tmp_cells,
int* restrict obstacles, int start, int end, MPI_Request* requests);
int write_values(const t_param params, t_speed* cells, int* obstacles, float* av_vels, float* av_vels_local,
int rank, int size, int* ny_local, int* displs);
/* finalise, including freeing up allocated memory */
int finalise(const t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr,
int** obstacles_ptr, float** av_vels_ptr, float** av_vels_local_ptr);
/* Sum all the densities in the grid.
** The total should remain constant from one timestep to the next. */
float total_density(const t_param params, t_speed* cells,
int rank, int size, int* ny_local, int* displs);
/* compute average velocity */
float av_velocity(const t_param params, t_speed* cells, int* obstacles,
int rank, int size, int* ny_local, int* displs);
/* calculate Reynolds number */
float calc_reynolds(const t_param params, t_speed* cells, int* obstacles,
int rank, int size, int* ny_local, int* displs);
/* utility functions */
void die(const char* message, const int line, const char* file);
void usage(const char* exe);
/*
** main program:
** initialise, timestep loop, finalise
*/
int main(int argc, char* argv[])
{
char* paramfile = NULL; /* name of the input parameter file */
char* obstaclefile = NULL; /* name of a the input obstacle file */
t_param params; /* struct to hold parameter values */
t_speed* cells = NULL; /* grid containing fluid densities */
//t_speed* cells1 = NULL;
t_speed* tmp_cells = NULL;
//t_speed* tmp_cells1 = NULL; /* scratch space */
int* obstacles = NULL; /* grid indicating which cells are blocked */
//int* obstacles1 = NULL;
float* av_vels = NULL; /* a record of the av. velocity computed for each timestep */
float* av_vels_local = NULL;
struct timeval timstr; /* structure to hold elapsed time */
struct rusage ru; /* structure to hold CPU time--system and user */
double tic, toc; /* floating point numbers to calculate elapsed wallclock time */
double usrtim; /* floating point number to record elapsed user CPU time */
double systim; /* floating point number to record elapsed system CPU time */
#ifdef PAR
int tsize = omp_get_max_threads();
//int tsize = 15;
#else
int tsize = 1;
#endif
//printf("Threads: %d\n",tsize);
/************** MPI Part ********************/
int size=1, rank=0;
//int required = MPI_THREAD_MULTIPLE;
//int provided;
//MPI_Init_thread( &argc, &argv, required, &provided );
//if(required!=provided) MPI_Abort(MPI_COMM_WORLD, 1);
MPI_Init( &argc, &argv );
MPI_Comm_size( MPI_COMM_WORLD, &size );
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
//printf("Rank:%d, Size:%d\n",rank,size);
int ny_local[MPI_PROCS];
int displs[MPI_PROCS];
/* **************************************** */
//feenableexcept(FE_INVALID | FE_OVERFLOW);
/* parse the command line */
if (argc != 3)
{
usage(argv[0]);
}
else
{
paramfile = argv[1];
obstaclefile = argv[2];
}
/* initialise our data structures and load values from file */
initialise(paramfile, obstaclefile, ¶ms, &cells, &tmp_cells, &obstacles, &av_vels, &av_vels_local,
rank, size, ny_local, displs);
/***************************OpenMP*******************************/
int omp_ny_local[NUMTHREADS];
int omp_displs[NUMTHREADS];
int omp_orig_ny_local = ny_local[rank]/tsize;
int omp_left = ny_local[rank]%tsize;
int one_for_last_thread = 0;
int one_less_for_second_to_last = 0;
//if it is less than 3 then it is 2 given that the smallest
//size is 128x128 and max rank size is 64.
if(omp_orig_ny_local<3 && omp_left){
omp_left--;
one_for_last_thread = 1;
}
else if(omp_orig_ny_local<3 && !omp_left){
one_for_last_thread = 1;
one_less_for_second_to_last = 1;
}
//we need to make sure that the last thread gets at least 3 rows
//so that accelerate_flow will not affect other rows.
for(int tid=0;tid<tsize;tid++){
if(tid<tsize-2)
omp_ny_local[tid] = omp_orig_ny_local;
else if(tid == tsize-2)
omp_ny_local[tid] = omp_orig_ny_local - one_less_for_second_to_last;
else if(tid == tsize-1)
omp_ny_local[tid] = omp_orig_ny_local + one_for_last_thread;
if(tid<omp_left) omp_ny_local[tid]++;
if(tid == MASTER)
omp_displs[tid] = 1; //start from 1 to accommodate the halo rows
else
omp_displs[tid] = omp_displs[tid-1] + omp_ny_local[tid-1];
}
int tag = 0;
int top = rank-1;
if(top<0) top = size-1;
int bottom = (rank+1)%size;
int haloTopOffset = 0;
int haloBottomOffset = params.nx*(ny_local[rank]+1);
int topRowOffset = params.nx;
int bottomRowOffset = params.nx*ny_local[rank];
MPI_Request requests[4];
MPI_Request requests2[4];
MPI_Request* lookup[2];
lookup[0] = requests;
lookup[1] = requests2;
int reqIndex = 0;
MPI_Request* curReq;
omp_lock_t writelock;
omp_init_lock(&writelock);
omp_set_lock(&writelock);
int flag = 0;
//int bufsize1 = 0;
//int bufsize2 = 0;
//MPI_Pack_size(1, MPI_ROW_OF_CELLS, MPI_COMM_WORLD, &bufsize1);
//MPI_Pack_size(1, MPI_ROW_OF_CELLS, MPI_COMM_WORLD, &bufsize2);
//int bufsize = bufsize1 + bufsize2 + 2*MPI_BSEND_OVERHEAD;
//void* userbuff = malloc(bufsize);
//MPI_Buffer_attach(userbuff, bufsize);
/* ************************************************************** */
/* iterate for maxIters timesteps */
#ifdef PROFILE
MPI_Pcontrol(1,"mainloop");
#endif
gettimeofday(&timstr, NULL);
tic = timstr.tv_sec + (timstr.tv_usec / 1000000.0);
#ifdef PAR
#pragma omp parallel private(curReq,reqIndex) firstprivate(tmp_cells,cells)
#endif
{
#ifdef PAR
int tid = omp_get_thread_num();
int start = omp_displs[tid];
int end = start + omp_ny_local[tid];
//printf("%d: %d -- %d\n",rank,start,end);
if(start == 1) start++;
if(end == ny_local[rank] + 1) end--;
#else
int start = 2;
int end = ny_local[rank];
#endif
MPI_Recv_init(&cells[haloBottomOffset], 1, MPI_ROW_OF_CELLS, bottom, tag,
MPI_COMM_WORLD, &requests[0]);
MPI_Recv_init(&cells[haloTopOffset], 1, MPI_ROW_OF_CELLS, top, tag,
MPI_COMM_WORLD, &requests[1]);
MPI_Send_init(&cells[topRowOffset], 1, MPI_ROW_OF_CELLS, top, tag,
MPI_COMM_WORLD, &requests[2]);
MPI_Send_init(&cells[bottomRowOffset], 1, MPI_ROW_OF_CELLS, bottom, tag,
MPI_COMM_WORLD, &requests[3]);
MPI_Recv_init(&tmp_cells[haloBottomOffset], 1, MPI_ROW_OF_CELLS, bottom, tag,
MPI_COMM_WORLD, &requests2[0]);
MPI_Recv_init(&tmp_cells[haloTopOffset], 1, MPI_ROW_OF_CELLS, top, tag,
MPI_COMM_WORLD, &requests2[1]);
MPI_Send_init(&tmp_cells[topRowOffset], 1, MPI_ROW_OF_CELLS, top, tag,
MPI_COMM_WORLD, &requests2[2]);
MPI_Send_init(&tmp_cells[bottomRowOffset], 1, MPI_ROW_OF_CELLS, bottom, tag,
MPI_COMM_WORLD, &requests2[3]);
for (unsigned int tt = 0; tt < params.maxIters;tt++)
{
#ifdef PAR
#pragma omp barrier
#endif
#ifdef PAR
if(tid == MASTER)
{
#endif
curReq = lookup[reqIndex];
MPI_Startall(4, curReq);
reqIndex ^= 1;
//MPI_Testall(4, curReq, &flag, MPI_STATUSES_IGNORE);
#ifdef PAR
}
#endif
//MPI_Sendrecv(&cells[topRowOffset], 1, MPI_ROW_OF_CELLS, top, tag,
// &cells[haloBottomOffset], 1, MPI_ROW_OF_CELLS, bottom, tag,
// MPI_COMM_WORLD, MPI_STATUS_IGNORE);
//MPI_Sendrecv(&cells[bottomRowOffset], 1, MPI_ROW_OF_CELLS, bottom, tag,
// &cells[haloTopOffset], 1, MPI_ROW_OF_CELLS, top, tag,
// MPI_COMM_WORLD, MPI_STATUS_IGNORE);
//}
#ifdef PAR
if(tid==tsize-1 && rank==size-1){
#else
if(rank == size-1){
#endif
accelerate_flow(params, cells, obstacles, rank, ny_local);
}
float local = timestep(params, cells, tmp_cells, obstacles, start, end, requests);
#ifdef PAR
if(tid == MASTER){
MPI_Waitall(4, curReq, MPI_STATUSES_IGNORE);
omp_unset_lock(&writelock);
local += timestep(params, cells, tmp_cells, obstacles, 1, 2, requests);
}
if(tid == tsize-1){
omp_set_lock(&writelock);
local += timestep(params, cells, tmp_cells, obstacles, ny_local[rank], ny_local[rank]+1, requests);
}
#else
MPI_Waitall(4,curReq, MPI_STATUSES_IGNORE);
local += timestep(params, cells, tmp_cells, obstacles, 1, 2, requests);
local += timestep(params, cells, tmp_cells, obstacles, ny_local[rank], ny_local[rank]+1, requests);
av_vels_local[tt] = local * params.free_cells_inv;
#endif
#ifdef PAR
local *= params.free_cells_inv;
#pragma omp atomic
av_vels_local[tt] += local;
#endif
t_speed* tmp = cells;
cells = tmp_cells;
tmp_cells = tmp;
/*
#ifdef DEBUG
#ifdef PAR
#pragma omp single nowait
#endif
{
float dens = total_density(params, cells, rank, size, ny_local, displs);
if(rank==MASTER){
printf("==timestep: %d==\n", tt);
printf("av velocity: %.12E\n", av_vels[tt]);
printf("tot density: %.12E\n", dens);
}
}
#endif
*/
}
}
MPI_Reduce(av_vels_local, av_vels, params.maxIters, MPI_FLOAT, MPI_SUM, MASTER, MPI_COMM_WORLD);
gettimeofday(&timstr, NULL);
toc = timstr.tv_sec + (timstr.tv_usec / 1000000.0);
getrusage(RUSAGE_SELF, &ru);
timstr = ru.ru_utime;
usrtim = timstr.tv_sec + (timstr.tv_usec / 1000000.0);
timstr = ru.ru_stime;
systim = timstr.tv_sec + (timstr.tv_usec / 1000000.0);
#ifdef PROFILE
MPI_Pcontrol(-1,"mainloop");
#endif
/* write final values and free memory */
float reyn = calc_reynolds(params, cells, obstacles, rank, size, ny_local, displs);
if(rank == MASTER)
{
printf("==done==\n");
printf("Reynolds number:\t\t%.12E\n", reyn);
printf("Elapsed time:\t\t\t%.6lf (s)\n", toc - tic);
printf("Elapsed user CPU time:\t\t%.6lf (s)\n", usrtim);
printf("Elapsed system CPU time:\t%.6lf (s)\n", systim);
}
omp_destroy_lock(&writelock);
#ifndef PROFILE
write_values(params, cells, obstacles, av_vels, av_vels_local, rank, size, ny_local, displs);
#endif
finalise(¶ms, &cells, &tmp_cells, &obstacles, &av_vels, &av_vels_local);
//int dummy;
//MPI_Buffer_detach(&userbuff, &dummy);
MPI_Request_free(&requests[0]);
MPI_Request_free(&requests[1]);
MPI_Request_free(&requests[2]);
MPI_Request_free(&requests[3]);
MPI_Type_free(&MPI_ROW_OF_OBSTACLES);
MPI_Type_free(&MPI_ROW_OF_CELLS);
//free(userbuff);
MPI_Finalize();
return EXIT_SUCCESS;
}
inline int accelerate_flow(const t_param params, t_speed* restrict cells, int* restrict obstacles, int rank, int* ny_local)
{
/* compute weighting factors */
float w1 = params.density * params.accel * 0.111111111111111111111111f;
float w2 = params.density * params.accel * 0.0277777777777777777777778f;
/* modify the 2nd row of the grid */
int ii = ny_local[rank] - 1;
//int tid = omp_get_thread_num();
//int start = tid * (params.nx/NUMTHREADS);
//int end = (tid+1) * (params.nx/NUMTHREADS);
for (unsigned int jj = 0; jj < params.nx; jj+=VECSIZE)
{
#pragma vector aligned
for(int k=0;k<VECSIZE;k++){
if (!obstacles[ii * params.nx + jj+k]
&& cells[ii*params.nx+jj+k].speeds[3]-w1>0.0f
&& cells[ii*params.nx+jj+k].speeds[6]-w2>0.0f
&& cells[ii*params.nx+jj+k].speeds[7]-w2>0.0f){
/* increase 'east-side' densities */
cells[ii * params.nx + jj+k].speeds[1] += w1;
cells[ii * params.nx + jj+k].speeds[5] += w2;
cells[ii * params.nx + jj+k].speeds[8] += w2;
/* decrease 'west-side' densities */
cells[ii * params.nx + jj+k].speeds[3] -= w1;
cells[ii * params.nx + jj+k].speeds[6] -= w2;
cells[ii * params.nx + jj+k].speeds[7] -= w2;
}
}
}
return EXIT_SUCCESS;
}
//float sqrt13(float n)
//{
// float result;
//
// __asm__(
// "fsqrt\n\t"
// : "=t"(result) : "0"(n)
// );
//
// return result;
//}
inline float timestep(const t_param params, t_speed* restrict cells, t_speed* restrict tmp_cells,
int* restrict obstacles, int start, int end, MPI_Request* requests)
{
//static const float c_sq = 1.0 / 3.0; /* square of speed of sound */
static const float ic_sq = 3.0f;
//static const float ic_sq_sq = 9.0;
static const float w0 = 4.0f / 9.0f; /* weighting factor */
static const float w1 = 1.0f / 9.0f; /* weighting factor */
static const float w2 = 1.0f / 36.0f; /* weighting factor */
float tot_u = 0.0f;
/* loop over the cells in the grid
** NB the collision step is called after
** the propagate step and so values of interest
** are in the scratch-space grid */
for (unsigned int ii = start; ii < end; ii++)
{
int y_n = ii + 1;
int y_s = ii - 1;
//int y_n = ii + 1;
//if(y_n > params.ny) y_n = 1;
//int y_s = 0;
//if (ii == 1)
// y_s = params.ny;
//else
// y_s = ii - 1;
for(unsigned int jj = 0; jj < params.nx; jj+=VECSIZE){
/* determine indices of axis-direction neighbours
** respecting periodic boundary conditions (wrap around) */
float tmp[VECSIZE*NSPEEDS] __attribute__((aligned(32)));
#pragma vector aligned
for(int k=0;k<VECSIZE;k++){
int x = jj+k;
int x_e = x + 1;
if(x_e >= params.nx) x_e -= params.nx;
int x_w = (x == 0) ? (params.nx - 1) : (x-1);
tmp[VECSIZE*0+k] = cells[ii * params.nx + x].speeds[0];
tmp[VECSIZE*1+k] = cells[ii * params.nx + x_w].speeds[1];
tmp[VECSIZE*2+k] = cells[y_s * params.nx + x].speeds[2];
tmp[VECSIZE*3+k] = cells[ii * params.nx + x_e].speeds[3];
tmp[VECSIZE*4+k] = cells[y_n * params.nx + x].speeds[4];
tmp[VECSIZE*5+k] = cells[y_s * params.nx + x_w].speeds[5];
tmp[VECSIZE*6+k] = cells[y_s * params.nx + x_e].speeds[6];
tmp[VECSIZE*7+k] = cells[y_n * params.nx + x_e].speeds[7];
tmp[VECSIZE*8+k] = cells[y_n * params.nx + x_w].speeds[8];
}
float densvec[VECSIZE] __attribute__((aligned(32)));
#pragma vector aligned
for(int k=0;k<VECSIZE;k++){
densvec[k] = tmp[VECSIZE*0+k];
densvec[k] += tmp[VECSIZE*1+k];
densvec[k] += tmp[VECSIZE*2+k];
densvec[k] += tmp[VECSIZE*3+k];
densvec[k] += tmp[VECSIZE*4+k];
densvec[k] += tmp[VECSIZE*5+k];
densvec[k] += tmp[VECSIZE*6+k];
densvec[k] += tmp[VECSIZE*7+k];
densvec[k] += tmp[VECSIZE*8+k];
}
float densinv[VECSIZE] __attribute__((aligned(32)));
#pragma vector aligned
for(int k=0;k<VECSIZE;k++)
{
densinv[k] = 1.0f/densvec[k];
}
float u_x[VECSIZE] __attribute__((aligned(32)));
float u_y[VECSIZE] __attribute__((aligned(32)));
#pragma vector aligned
for(int k=0;k<VECSIZE;k++)
{
u_x[k] = tmp[VECSIZE*1+k] + tmp[VECSIZE*5+k];
u_x[k] += tmp[VECSIZE*8+k];
u_x[k] -= tmp[VECSIZE*3+k];
u_x[k] -= tmp[VECSIZE*6+k];
u_x[k] -= tmp[VECSIZE*7+k];
//u_x[k] *= densinv[k];
u_y[k] = tmp[VECSIZE*2+k] + tmp[VECSIZE*5+k];
u_y[k] += tmp[VECSIZE*6+k];
u_y[k] -= tmp[VECSIZE*4+k];
u_y[k] -= tmp[VECSIZE*7+k];
u_y[k] -= tmp[VECSIZE*8+k];
//u_y[k] *= densinv[k];
}
float u_sq[VECSIZE] __attribute__((aligned(32)));
#pragma vector aligned
for(int k=0;k<VECSIZE;k++)
{
u_sq[k] = u_x[k]*u_x[k] + u_y[k]*u_y[k];
}
float uvec[NSPEEDS*VECSIZE] __attribute__((aligned(32)));
#pragma vector aligned
for(int k=0;k<VECSIZE;k++)
{
uvec[VECSIZE*1+k] = u_x[k];
uvec[VECSIZE*2+k] = u_y[k];
uvec[VECSIZE*3+k] = - u_x[k];
uvec[VECSIZE*4+k] = - u_y[k];
uvec[VECSIZE*5+k] = u_x[k] + u_y[k];
uvec[VECSIZE*6+k] = - u_x[k] + u_y[k];
uvec[VECSIZE*7+k] = - u_x[k] - u_y[k];
uvec[VECSIZE*8+k] = u_x[k] - u_y[k];
}
float ic_sqtimesu[NSPEEDS*VECSIZE] __attribute__((aligned(32)));
#pragma vector aligned
for(int k=0;k<VECSIZE;k++)
{
ic_sqtimesu[VECSIZE*1+k] = uvec[VECSIZE*1+k]*ic_sq;
ic_sqtimesu[VECSIZE*2+k] = uvec[VECSIZE*2+k]*ic_sq;
ic_sqtimesu[VECSIZE*3+k] = uvec[VECSIZE*3+k]*ic_sq;
ic_sqtimesu[VECSIZE*4+k] = uvec[VECSIZE*4+k]*ic_sq;
ic_sqtimesu[VECSIZE*5+k] = uvec[VECSIZE*5+k]*ic_sq;
ic_sqtimesu[VECSIZE*6+k] = uvec[VECSIZE*6+k]*ic_sq;
ic_sqtimesu[VECSIZE*7+k] = uvec[VECSIZE*7+k]*ic_sq;
ic_sqtimesu[VECSIZE*8+k] = uvec[VECSIZE*8+k]*ic_sq;
}
float ic_sqtimesu_sq[NSPEEDS*VECSIZE] __attribute__((aligned(32)));
#pragma vector aligned
for(int k=0;k<VECSIZE;k++)
{
ic_sqtimesu_sq[VECSIZE*1+k] = ic_sqtimesu[VECSIZE*1+k] * uvec[VECSIZE*1+k];
ic_sqtimesu_sq[VECSIZE*2+k] = ic_sqtimesu[VECSIZE*2+k] * uvec[VECSIZE*2+k];
ic_sqtimesu_sq[VECSIZE*3+k] = ic_sqtimesu[VECSIZE*3+k] * uvec[VECSIZE*3+k];
ic_sqtimesu_sq[VECSIZE*4+k] = ic_sqtimesu[VECSIZE*4+k] * uvec[VECSIZE*4+k];
ic_sqtimesu_sq[VECSIZE*5+k] = ic_sqtimesu[VECSIZE*5+k] * uvec[VECSIZE*5+k];
ic_sqtimesu_sq[VECSIZE*6+k] = ic_sqtimesu[VECSIZE*6+k] * uvec[VECSIZE*6+k];
ic_sqtimesu_sq[VECSIZE*7+k] = ic_sqtimesu[VECSIZE*7+k] * uvec[VECSIZE*7+k];
ic_sqtimesu_sq[VECSIZE*8+k] = ic_sqtimesu[VECSIZE*8+k] * uvec[VECSIZE*8+k];
}
float d_equ[NSPEEDS*VECSIZE] __attribute__((aligned(32)));
#pragma vector aligned
for(int k=0;k<VECSIZE;k++)
{
d_equ[VECSIZE*0+k] = w0 * (densvec[k] - 0.5f*densinv[k]*ic_sq*u_sq[k]);
d_equ[VECSIZE*1+k] = w1 * (densvec[k] + ic_sqtimesu[VECSIZE*1+k] + 0.5f * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*1+k]-u_sq[k]) );
d_equ[VECSIZE*2+k] = w1 * (densvec[k] + ic_sqtimesu[VECSIZE*2+k] + 0.5f * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*2+k]-u_sq[k]) );
d_equ[VECSIZE*3+k] = w1 * (densvec[k] + ic_sqtimesu[VECSIZE*3+k] + 0.5f * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*3+k]-u_sq[k]) );
d_equ[VECSIZE*4+k] = w1 * (densvec[k] + ic_sqtimesu[VECSIZE*4+k] + 0.5f * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*4+k]-u_sq[k]) );
d_equ[VECSIZE*5+k] = w2 * (densvec[k] + ic_sqtimesu[VECSIZE*5+k] + 0.5f * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*5+k]-u_sq[k]) );
d_equ[VECSIZE*6+k] = w2 * (densvec[k] + ic_sqtimesu[VECSIZE*6+k] + 0.5f * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*6+k]-u_sq[k]) );
d_equ[VECSIZE*7+k] = w2 * (densvec[k] + ic_sqtimesu[VECSIZE*7+k] + 0.5f * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*7+k]-u_sq[k]) );
d_equ[VECSIZE*8+k] = w2 * (densvec[k] + ic_sqtimesu[VECSIZE*8+k] + 0.5f * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*8+k]-u_sq[k]) );
}
int obst=0;
#pragma vector aligned
for(int k=0;k<VECSIZE;k++){
obst+=obstacles[ii*params.nx+jj+k];
}
if(!obst){
#pragma vector aligned
for(int k=0;k<VECSIZE;k++){
tmp_cells[ii * params.nx + jj + k].speeds[0] = tmp[VECSIZE*0+k] + params.omega*(d_equ[VECSIZE*0+k] - tmp[VECSIZE*0+k]);
tmp_cells[ii * params.nx + jj + k].speeds[1] = tmp[VECSIZE*1+k] + params.omega*(d_equ[VECSIZE*1+k] - tmp[VECSIZE*1+k]);
tmp_cells[ii * params.nx + jj + k].speeds[2] = tmp[VECSIZE*2+k] + params.omega*(d_equ[VECSIZE*2+k] - tmp[VECSIZE*2+k]);
tmp_cells[ii * params.nx + jj + k].speeds[3] = tmp[VECSIZE*3+k] + params.omega*(d_equ[VECSIZE*3+k] - tmp[VECSIZE*3+k]);
tmp_cells[ii * params.nx + jj + k].speeds[4] = tmp[VECSIZE*4+k] + params.omega*(d_equ[VECSIZE*4+k] - tmp[VECSIZE*4+k]);
tmp_cells[ii * params.nx + jj + k].speeds[5] = tmp[VECSIZE*5+k] + params.omega*(d_equ[VECSIZE*5+k] - tmp[VECSIZE*5+k]);
tmp_cells[ii * params.nx + jj + k].speeds[6] = tmp[VECSIZE*6+k] + params.omega*(d_equ[VECSIZE*6+k] - tmp[VECSIZE*6+k]);
tmp_cells[ii * params.nx + jj + k].speeds[7] = tmp[VECSIZE*7+k] + params.omega*(d_equ[VECSIZE*7+k] - tmp[VECSIZE*7+k]);
tmp_cells[ii * params.nx + jj + k].speeds[8] = tmp[VECSIZE*8+k] + params.omega*(d_equ[VECSIZE*8+k] - tmp[VECSIZE*8+k]);
tot_u += sqrt(u_sq[k]) * densinv[k];
}
}
else{
#pragma vector aligned
for(int k=0;k<VECSIZE;k++){
if(!obstacles[ii * params.nx +jj +k]){
tmp_cells[ii * params.nx + jj + k].speeds[0] = tmp[VECSIZE*0+k] + params.omega*(d_equ[VECSIZE*0+k] - tmp[VECSIZE*0+k]);
tmp_cells[ii * params.nx + jj + k].speeds[1] = tmp[VECSIZE*1+k] + params.omega*(d_equ[VECSIZE*1+k] - tmp[VECSIZE*1+k]);
tmp_cells[ii * params.nx + jj + k].speeds[2] = tmp[VECSIZE*2+k] + params.omega*(d_equ[VECSIZE*2+k] - tmp[VECSIZE*2+k]);
tmp_cells[ii * params.nx + jj + k].speeds[3] = tmp[VECSIZE*3+k] + params.omega*(d_equ[VECSIZE*3+k] - tmp[VECSIZE*3+k]);
tmp_cells[ii * params.nx + jj + k].speeds[4] = tmp[VECSIZE*4+k] + params.omega*(d_equ[VECSIZE*4+k] - tmp[VECSIZE*4+k]);
tmp_cells[ii * params.nx + jj + k].speeds[5] = tmp[VECSIZE*5+k] + params.omega*(d_equ[VECSIZE*5+k] - tmp[VECSIZE*5+k]);
tmp_cells[ii * params.nx + jj + k].speeds[6] = tmp[VECSIZE*6+k] + params.omega*(d_equ[VECSIZE*6+k] - tmp[VECSIZE*6+k]);
tmp_cells[ii * params.nx + jj + k].speeds[7] = tmp[VECSIZE*7+k] + params.omega*(d_equ[VECSIZE*7+k] - tmp[VECSIZE*7+k]);
tmp_cells[ii * params.nx + jj + k].speeds[8] = tmp[VECSIZE*8+k] + params.omega*(d_equ[VECSIZE*8+k] - tmp[VECSIZE*8+k]);
tot_u += sqrt(u_sq[k]) * densinv[k];
}
else{
tmp_cells[ii * params.nx + jj + k].speeds[0] = tmp[VECSIZE*0+k];
tmp_cells[ii * params.nx + jj + k].speeds[3] = tmp[VECSIZE*1+k];
tmp_cells[ii * params.nx + jj + k].speeds[4] = tmp[VECSIZE*2+k];
tmp_cells[ii * params.nx + jj + k].speeds[1] = tmp[VECSIZE*3+k];
tmp_cells[ii * params.nx + jj + k].speeds[2] = tmp[VECSIZE*4+k];
tmp_cells[ii * params.nx + jj + k].speeds[7] = tmp[VECSIZE*5+k];
tmp_cells[ii * params.nx + jj + k].speeds[8] = tmp[VECSIZE*6+k];
tmp_cells[ii * params.nx + jj + k].speeds[5] = tmp[VECSIZE*7+k];
tmp_cells[ii * params.nx + jj + k].speeds[6] = tmp[VECSIZE*8+k];
}
}
}
}
}
return tot_u;
}
//only MASTER returns correct value
float av_velocity(const t_param params, t_speed* cells, int* obstacles,
int rank, int size, int* ny_local, int* displs)
{
float tot_u; /* accumulated magnitudes of velocity for each cell */
/* initialise */
tot_u = 0.0f;
/* loop over all non-blocked cells */
for (unsigned int ii = 1; ii < ny_local[rank]+1; ii++)
{
for (unsigned int jj = 0; jj < params.nx; jj++)
{
/* ignore occupied cells */
if (!obstacles[ii*params.nx+jj])
{
/* local density total */
float local_density = 0.0f;
for (unsigned int kk = 0; kk < NSPEEDS; kk++)
{
local_density += cells[ii*params.nx+jj].speeds[kk];
}
/* x-component of velocity */
t_speed* cell = &cells[ii*params.nx+jj];
float u_x = (cell->speeds[1]
+ cell->speeds[5]
+ cell->speeds[8]
- (cell->speeds[3]
+ cell->speeds[6]
+ cell->speeds[7]))
/ local_density;
/* compute y velocity component */
float u_y = (cell->speeds[2]
+ cell->speeds[5]
+ cell->speeds[6]
- (cell->speeds[4]
+ cell->speeds[7]
+ cell->speeds[8]))
/ local_density;
/* accumulate the norm of x- and y- velocity components */
tot_u += sqrt((u_x * u_x) + (u_y * u_y));
}
}
}
float ranklocal = tot_u * params.free_cells_inv;
float res=0;
MPI_Reduce(&ranklocal, &res, 1, MPI_FLOAT, MPI_SUM, MASTER, MPI_COMM_WORLD);
return res;
}
int initialise(char* paramfile, const char* obstaclefile,
t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr,
int** obstacles_ptr, float** av_vels_ptr, float** av_vels_local_ptr, int rank, int size,
int* ny_local, int* displs)
{
char message[1024]; /* message buffer */
FILE* fp;
int xx, yy; /* generic array indices */
int blocked; /* indicates whether a cell is blocked by an obstacle */
int retval; /* to hold return value for checking */
/* open the parameter file */
fp = fopen(paramfile,"r");
if (fp == NULL)
{
sprintf(message, "could not open input parameter file: %s", paramfile);
die(message, __LINE__, __FILE__);
}
/* read in the parameter values */
retval = fscanf(fp, "%d\n", &(params->nx));
if (retval != 1) die("could not read param file: nx", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", &(params->ny));
if (retval != 1) die("could not read param file: ny", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", &(params->maxIters));
if (retval != 1) die("could not read param file: maxIters", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", &(params->reynolds_dim));
if (retval != 1) die("could not read param file: reynolds_dim", __LINE__, __FILE__);
retval = fscanf(fp, "%f\n", &(params->density));
if (retval != 1) die("could not read param file: density", __LINE__, __FILE__);
retval = fscanf(fp, "%f\n", &(params->accel));
if (retval != 1) die("could not read param file: accel", __LINE__, __FILE__);
retval = fscanf(fp, "%f\n", &(params->omega));
if (retval != 1) die("could not read param file: omega", __LINE__, __FILE__);
/* and close up the file */
fclose(fp);
int numOfFreeCells = params->nx*params->ny;
/*
** Allocate memory.
**
** Remember C is pass-by-value, so we need to
** pass pointers into the initialise function.
**
** NB we are allocating a 1D array, so that the
** memory will be contiguous. We still want to
** index this memory as if it were a (row major
** ordered) 2D array, however. We will perform
** some arithmetic using the row and column
** coordinates, inside the square brackets, when
** we want to access elements of this array.
**
** Note also that we are using a structure to
** hold an array of 'speeds'. We will allocate
** a 1D array of these structs.
*/
/* main grid */
/* ******************************MPI********************************* */
MPI_Type_contiguous(params->nx,MPI_INT,&MPI_ROW_OF_OBSTACLES);
MPI_Type_commit(&MPI_ROW_OF_OBSTACLES);
MPI_Type_contiguous(NSPEEDS, MPI_FLOAT, &MPI_TCELL);
MPI_Type_contiguous(params->nx,MPI_TCELL,&MPI_ROW_OF_CELLS);
MPI_Type_commit(&MPI_ROW_OF_CELLS);
int orig_ny_local = params->ny/size;
int left = params->ny%size;
int one_for_last_rank = 0;
int one_less_for_second_to_last = 0;
//if it is less than 3 then it is 2 given that the smallest
//size is 128x128 and max rank size is 64.
if(orig_ny_local<3 && left){
left--;
one_for_last_rank = 1;
}
else if(orig_ny_local<3 && !left){
one_for_last_rank = 1;
one_less_for_second_to_last = 1;
}
//we need to make sure that the last rank gets at least 3 rows
//so that accelerate_flow will not affect other rows.
for(int proc=0;proc<size;proc++){
if(proc<size-2)
ny_local[proc] = orig_ny_local;
else if(proc == size-2)
ny_local[proc] = orig_ny_local - one_less_for_second_to_last;
else if(proc == size-1)
ny_local[proc] = orig_ny_local + one_for_last_rank;
if(proc<left) ny_local[proc]++;
if(proc == MASTER)
displs[proc] = 0;
else
displs[proc] = displs[proc-1] + ny_local[proc-1];
}
/*************************************************************************/
*cells_ptr = (t_speed*)malloc(sizeof(t_speed) * ((ny_local[rank]+2) * params->nx));
if (*cells_ptr == NULL) die("cannot allocate memory for cells", __LINE__, __FILE__);
/* 'helper' grid, used as scratch space */
*tmp_cells_ptr = (t_speed*)malloc(sizeof(t_speed) * ((ny_local[rank]+2) * params->nx));
if (*tmp_cells_ptr == NULL) die("cannot allocate memory for tmp_cells", __LINE__, __FILE__);
/* the map of obstacles */
*obstacles_ptr = (int*)malloc(sizeof(int) * ((ny_local[rank]+2) * params->nx));//+2 not needed but makes things easier
if (*obstacles_ptr == NULL) die("cannot allocate column memory for obstacles", __LINE__, __FILE__);
/* initialise densities */
float w0 = params->density * 4.0f / 9.0f;
float w1 = params->density / 9.0f;
float w2 = params->density / 36.0f;
for (unsigned int ii = 1; ii < ny_local[rank]+1; ii++)
{
for (unsigned int jj = 0; jj < params->nx; jj++)
{
t_speed* cell = &((*cells_ptr)[ii*params->nx+jj]);
/* centre */
cell->speeds[0] = w0;
/* axis directions */
cell->speeds[1] = w1;
cell->speeds[2] = w1;
cell->speeds[3] = w1;
cell->speeds[4] = w1;
/* diagonals */
cell->speeds[5] = w2;
cell->speeds[6] = w2;
cell->speeds[7] = w2;
cell->speeds[8] = w2;
}
}
/* first set all cells in obstacle array to zero */
for (unsigned int ii = 1; ii < ny_local[rank]+1; ii++)
{
for (unsigned int jj = 0; jj < params->nx; jj++)
{
(*obstacles_ptr)[ii*params->nx+jj] = 0;
}
}
int* obstacles_all = NULL;
/* open the obstacle data file */
if(rank==MASTER)
{
obstacles_all = (int*)malloc( sizeof(int) * params->nx * params->ny );
for(unsigned int ii=0;ii<params->ny;ii++){
for(unsigned int jj=0;jj<params->nx;jj++){
obstacles_all[ii*params->nx+jj] = 0;
}
}
fp = fopen(obstaclefile, "r");
if (fp == NULL)
{
sprintf(message, "could not open input obstacles file: %s", obstaclefile);
die(message, __LINE__, __FILE__);
}
/* read-in the blocked cells list */
while ((retval = fscanf(fp, "%d %d %d\n", &xx, &yy, &blocked)) != EOF)
{
/* some checks */
if (retval != 3) die("expected 3 values per line in obstacle file", __LINE__, __FILE__);
if (xx < 0 || xx > params->nx - 1) die("obstacle x-coord out of range", __LINE__, __FILE__);
if (yy < 0 || yy > params->ny - 1) die("obstacle y-coord out of range", __LINE__, __FILE__);
if (blocked != 1) die("obstacle blocked value should be 1", __LINE__, __FILE__);
/* assign to array */
if(obstacles_all[yy*params->nx+xx]==0)
numOfFreeCells--;
obstacles_all[yy*params->nx+xx]=blocked;
}
params->free_cells_inv = 1.0f/numOfFreeCells;
/* and close the file */
fclose(fp);
*av_vels_ptr = (float*)malloc(sizeof(float) * params->maxIters);
}
*av_vels_local_ptr = (float*)malloc(sizeof(float) * params->maxIters);
for(int it=0;it<params->maxIters;it++){
(*av_vels_local_ptr)[it] = 0.0f;
}
MPI_Bcast(&(params->free_cells_inv), 1, MPI_FLOAT, MASTER, MPI_COMM_WORLD);
MPI_Scatterv(obstacles_all, ny_local, displs, MPI_ROW_OF_OBSTACLES,
&(*obstacles_ptr)[params->nx], ny_local[rank], MPI_ROW_OF_OBSTACLES,
MASTER, MPI_COMM_WORLD);
if(obstacles_all) free(obstacles_all);
return EXIT_SUCCESS;
}
int finalise(const t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr,
int** obstacles_ptr, float** av_vels_ptr, float** av_vels_local_ptr)
{
/*
** free up allocated memory
*/
free(*cells_ptr);
*cells_ptr = NULL;
free(*tmp_cells_ptr);
*tmp_cells_ptr = NULL;
free(*obstacles_ptr);
*obstacles_ptr = NULL;
if(*av_vels_ptr != NULL)
free(*av_vels_ptr);
*av_vels_ptr = NULL;
free(*av_vels_local_ptr);
return EXIT_SUCCESS;
}
float calc_reynolds(const t_param params, t_speed* cells, int* obstacles,
int rank, int size, int* ny_local, int* displs)
{
const float viscosity = 1.0f / 6.0f * (2.0f / params.omega - 1.0f);
return av_velocity(params, cells, obstacles, rank, size, ny_local, displs) * params.reynolds_dim / viscosity;
}
//ONLY MASTER GETS THE CORRECT RESULT
float total_density(const t_param params, t_speed* cells,
int rank, int size, int* ny_local, int* displs)
{
float total = 0.0f; /* accumulator */
for (unsigned int ii = 1; ii < ny_local[rank]+1; ii++)
{
for (unsigned int jj = 0; jj < params.nx; jj++)
{
for (unsigned int kk = 0; kk < NSPEEDS; kk++)
{
total += cells[ii*params.nx+jj].speeds[kk];
}
}
}
float res = 0;
MPI_Reduce(&total, &res, 1, MPI_FLOAT, MPI_SUM, MASTER, MPI_COMM_WORLD);
return res;
}
int write_values(const t_param params, t_speed* cells, int* obstacles, float* av_vels, float* av_vels_local,
int rank, int size, int* ny_local, int* displs)
{
FILE* fp; /* file pointer */
//MPI_File fh;
char buff[100];
const float c_sq = 1.0f / 3.0f; /* sq. of speed of sound */
float local_density; /* per grid cell sum of densities */
float pressure; /* fluid pressure in grid cell */
float u_x; /* x-component of velocity in grid cell */
float u_y; /* y-component of velocity in grid cell */
float u; /* norm--root of summed squares--of u_x and u_y */
//MPI_Reduce(av_vels_local, av_vels, params.maxIters, MPI_FLOAT, MPI_SUM, MASTER, MPI_COMM_WORLD);
for(int proc=0;proc<size;proc++)
{
MPI_Barrier(MPI_COMM_WORLD);
if(proc==rank)
{
if(proc==MASTER)
fp = fopen(FINALSTATEFILE, "w");
else
fp = fopen(FINALSTATEFILE, "a");
if (fp == NULL)
{
die("could not open file output file", __LINE__, __FILE__);
}
//MPI_File_open(MPI_COMM_WORLD, FINALSTATEFILE,
// MPI_MODE_CREATE | MPI_MODE_WRONLY,
// MPI_INFO_NULL, &fh);
//MPI_File_set_view(fh, displs[rank]*params.nx*linesize,
// MPI_CHAR, MPI_CHAR, "native", MPI_INFO_NULL);
for (unsigned int ii = 1; ii < ny_local[rank]+1; ii++)
{
for (unsigned int jj = 0; jj < params.nx; jj++)
{
/* an occupied cell */
if (obstacles[ii*params.nx+jj])
{
u_x = u_y = u = 0.0f;
pressure = params.density * c_sq;
}
/* no obstacle */
else
{
local_density = 0.0f;
t_speed* cell = &cells[ii*params.nx+jj];
for (unsigned int kk = 0; kk < NSPEEDS; kk++)
{
local_density += cell->speeds[kk];
}
/* compute x velocity component */
u_x = (cell->speeds[1]
+ cell->speeds[5]
+ cell->speeds[8]
- (cell->speeds[3]
+ cell->speeds[6]
+ cell->speeds[7]))
/ local_density;
/* compute y velocity component */
u_y = (cell->speeds[2]
+ cell->speeds[5]
+ cell->speeds[6]
- (cell->speeds[4]
+ cell->speeds[7]
+ cell->speeds[8]))
/ local_density;
/* compute norm of velocity */
u = sqrt((u_x * u_x) + (u_y * u_y));
/* compute pressure */
pressure = local_density * c_sq;
}
/* write to file */
fprintf(fp, "%d %d %.12E %.12E %.12E %.12E %d\n", jj, ii-1+displs[rank], u_x, u_y, u, pressure, obstacles[ii*params.nx+jj]);
//fprintf(fp, "%04d %04d %020.12E %020.12E %020.12E %020.12E %d\n", jj, ii-1+displs[rank], u_x, u_y, u, pressure, obstacles[ii*params.nx+jj]);
//MPI_File_write(fh, buff, linesize, MPI_CHAR, MPI_STATUS_IGNORE);
}
}
fclose(fp);
}
}
//MPI_File_close(&fh);
if(rank==MASTER)
{
fp = fopen(AVVELSFILE, "w");
if (fp == NULL)
{
die("could not open file output file", __LINE__, __FILE__);
}
for (unsigned int ii = 0; ii < params.maxIters; ii++)
{
fprintf(fp, "%d:\t%.12E\n", ii, av_vels[ii]);
}
fclose(fp);
}
return EXIT_SUCCESS;
}
void die(const char* message, const int line, const char* file)
{
fprintf(stderr, "Error at line %d of file %s:\n", line, file);
fprintf(stderr, "%s\n", message);
fflush(stderr);
exit(EXIT_FAILURE);
}
void usage(const char* exe)
{
fprintf(stderr, "Usage: %s <paramfile> <obstaclefile>\n", exe);
exit(EXIT_FAILURE);
}
|
fixed_size_vector.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & Newcastle University for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_CONTAINER_FIXED_SIZE_VECTOR_H_
#define CORE_CONTAINER_FIXED_SIZE_VECTOR_H_
#include <cassert>
#include <cstddef>
namespace bdm {
/// Vector with fixed number of elements == Array with push_back function that
/// keeps track of its size
/// NB: No bounds checking. Do not push_back more often than the number of
/// maximum elements given by the template parameter N
template <typename T, std::size_t N>
class FixedSizeVector {
public:
size_t size() const { return size_; } // NOLINT
const T& operator[](size_t idx) const { return data_[idx]; }
T& operator[](size_t idx) { return data_[idx]; }
FixedSizeVector& operator++() {
#pragma omp simd
for (size_t i = 0; i < N; i++) {
++data_[i];
}
return *this;
}
void clear() { size_ = 0; } // NOLINT
void push_back(const T& value) { // NOLINT
assert(size_ < N);
data_[size_++] = value;
}
const T* begin() const { return &(data_[0]); } // NOLINT
const T* end() const { return &(data_[size_]); } // NOLINT
T* begin() { return &(data_[0]); } // NOLINT
T* end() { return &(data_[size_]); } // NOLINT
private:
T data_[N];
std::size_t size_ = 0;
};
} // namespace bdm
#endif // CORE_CONTAINER_FIXED_SIZE_VECTOR_H_
|
csf.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "csf.h"
#include "sort.h"
#include "tile.h"
#include "util.h"
#include "thread_partition.h"
#include "io.h"
/******************************************************************************
* API FUNCTIONS
*****************************************************************************/
int splatt_csf_load(
char const * const fname,
splatt_idx_t * nmodes,
splatt_csf ** tensors,
double const * const options)
{
sptensor_t * tt = tt_read(fname);
if(tt == NULL) {
return SPLATT_ERROR_BADINPUT;
}
tt_remove_empty(tt);
*tensors = csf_alloc(tt, options);
*nmodes = tt->nmodes;
tt_free(tt);
return SPLATT_SUCCESS;
}
int splatt_csf_convert(
splatt_idx_t const nmodes,
splatt_idx_t const nnz,
splatt_idx_t ** const inds,
splatt_val_t * const vals,
splatt_csf ** tensors,
double const * const options)
{
sptensor_t tt;
tt_fill(&tt, nnz, nmodes, inds, vals);
tt_remove_empty(&tt);
*tensors = csf_alloc(&tt, options);
return SPLATT_SUCCESS;
}
void free_csf(
splatt_csf * tensors,
double const * const options)
{
csf_free(tensors, options);
}
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Count the nonzeros below a given node in a CSF tensor.
*
* @param fptr The adjacency pointer of the CSF tensor.
* @param nmodes The number of modes in the tensor.
* @param depth The depth of the node
* @param fiber The id of the node.
*
* @return The nonzeros below fptr[depth][fiber].
*/
idx_t p_csf_count_nnz(
idx_t * * fptr,
idx_t const nmodes,
idx_t depth,
idx_t const fiber)
{
if(depth == nmodes-1) {
return 1;
}
idx_t left = fptr[depth][fiber];
idx_t right = fptr[depth][fiber+1];
++depth;
for(; depth < nmodes-1; ++depth) {
left = fptr[depth][left];
right = fptr[depth][right];
}
return right - left;
}
/**
* @brief Find a permutation of modes that results in non-increasing mode size.
*
* @param dims The tensor dimensions.
* @param nmodes The number of modes.
* @param perm_dims The resulting permutation.
*/
static void p_order_dims_small(
idx_t const * const dims,
idx_t const nmodes,
idx_t * const perm_dims)
{
idx_t sorted[MAX_NMODES];
idx_t matched[MAX_NMODES];
for(idx_t m=0; m < nmodes; ++m) {
sorted[m] = dims[m];
matched[m] = 0;
}
quicksort(sorted, nmodes);
/* silly n^2 comparison to grab modes from sorted dimensions.
* TODO: make a key/val sort...*/
for(idx_t mfind=0; mfind < nmodes; ++mfind) {
for(idx_t mcheck=0; mcheck < nmodes; ++mcheck) {
if(sorted[mfind] == dims[mcheck] && !matched[mcheck]) {
perm_dims[mfind] = mcheck;
matched[mcheck] = 1;
break;
}
}
}
}
/**
* @brief Find a permutation of modes such that the first mode is 'custom-mode'
* and the remaining are naturally ordered (0, 1, ...).
*
* @param dims The tensor dimensions.
* @param nmodes The number of modes.
* @param custom_mode The mode to place first.
* @param perm_dims The resulting permutation.
*/
static void p_order_dims_inorder(
idx_t const * const dims,
idx_t const nmodes,
idx_t const custom_mode,
idx_t * const perm_dims)
{
/* initialize to natural ordering */
for(idx_t m=0; m < nmodes; ++m) {
perm_dims[m] = m;
}
/* find where custom_mode was placed and adjust from there */
for(idx_t m=0; m < nmodes; ++m) {
if(perm_dims[m] == custom_mode) {
memmove(perm_dims + 1, perm_dims, (m) * sizeof(m));
perm_dims[0] = custom_mode;
break;
}
}
}
/**
* @brief Find a permutation of modes such that the first mode is 'custom-mode'
* and the remaining are sorted in non-increasing order.
*
* @param dims The tensor dimensions.
* @param nmodes The number of modes.
* @param custom_mode The mode to place first.
* @param perm_dims The resulting permutation.
*/
static void p_order_dims_minusone(
idx_t const * const dims,
idx_t const nmodes,
idx_t const custom_mode,
idx_t * const perm_dims)
{
p_order_dims_small(dims, nmodes, perm_dims);
/* find where custom_mode was placed and adjust from there */
for(idx_t m=0; m < nmodes; ++m) {
if(perm_dims[m] == custom_mode) {
memmove(perm_dims + 1, perm_dims, (m) * sizeof(m));
perm_dims[0] = custom_mode;
break;
}
}
}
/**
* @brief Find a permutation of modes that results in non-decreasing mode size.
*
* @param dims The tensor dimensions.
* @param nmodes The number of modes.
* @param perm_dims The resulting permutation.
*/
static void p_order_dims_large(
idx_t const * const dims,
idx_t const nmodes,
idx_t * const perm_dims)
{
idx_t sorted[MAX_NMODES];
idx_t matched[MAX_NMODES];
for(idx_t m=0; m < nmodes; ++m) {
sorted[m] = dims[m];
matched[m] = 0;
}
/* sort small -> large */
quicksort(sorted, nmodes);
/* reverse list */
for(idx_t m=0; m < nmodes/2; ++m) {
idx_t tmp = sorted[nmodes-m-1];
sorted[nmodes-m-1] = sorted[m];
sorted[m] = tmp;
}
/* silly n^2 comparison to grab modes from sorted dimensions.
* TODO: make a key/val sort...*/
for(idx_t mfind=0; mfind < nmodes; ++mfind) {
for(idx_t mcheck=0; mcheck < nmodes; ++mcheck) {
if(sorted[mfind] == dims[mcheck] && !matched[mcheck]) {
perm_dims[mfind] = mcheck;
matched[mcheck] = 1;
break;
}
}
}
}
/**
* @brief Construct the sparsity structure of the outer-mode of a CSF tensor.
*
* @param ct The CSF tensor to construct.
* @param tt The coordinate tensor to construct from. Assumed to be already
* sorted.
* @param tile_id The ID of the tile to construct.
* @param nnztile_ptr A pointer into 'tt' that marks the start of each tile.
*/
static void p_mk_outerptr(
splatt_csf * const ct,
sptensor_t const * const tt,
idx_t const tile_id,
idx_t const * const nnztile_ptr)
{
idx_t const nnzstart = nnztile_ptr[tile_id];
idx_t const nnzend = nnztile_ptr[tile_id+1];
assert(nnzstart < nnzend);
idx_t const nnz = nnzend - nnzstart;
/* grab sparsity pattern */
csf_sparsity * const pt = ct->pt + tile_id;
/* grap top-level indices */
idx_t const * const restrict ttind =
nnzstart + tt->ind[csf_depth_to_mode(ct, 0)];
/* partition among threads */
int const nthreads = splatt_omp_get_max_threads();
idx_t * thread_parts = partition_simple(nnz, nthreads);
idx_t * thread_nfibs = malloc((nthreads+1) * sizeof(*thread_nfibs));
/* Fibers are counted by differing indices -- count at least one fiber */
thread_nfibs[0] = 1;
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
idx_t const nnz_start = SS_MAX(thread_parts[tid], 1); /* skip first nz */
idx_t const nnz_end = thread_parts[tid+1];
/* count fibers in each thread's partition */
idx_t local_nfibs = 0;
for(idx_t x=nnz_start; x < nnz_end; ++x) {
assert(ttind[x-1] <= ttind[x]);
if(ttind[x] != ttind[x-1]) {
++local_nfibs;
}
}
thread_nfibs[tid+1] = local_nfibs; /* +1 for prefix sum */
#pragma omp barrier
#pragma omp single
{
/* prefix sum on # fibers */
for(int t=0; t < nthreads; ++t) {
thread_nfibs[t+1] += thread_nfibs[t];
}
idx_t const nfibs = thread_nfibs[nthreads];
ct->pt[tile_id].nfibs[0] = nfibs;
assert(nfibs <= ct->dims[csf_depth_to_mode(ct, 0)]);
pt->fptr[0] = malloc((nfibs+1) * sizeof(**(pt->fptr)));
/* only store top-level fids if we are tiling or there are gaps */
if((ct->ntiles > 1) || (tt->dims[csf_depth_to_mode(ct, 0)] != nfibs)) {
pt->fids[0] = malloc(nfibs * sizeof(**(pt->fids)));
pt->fids[0][0] = ttind[0];
} else {
pt->fids[0] = NULL;
}
pt->fptr[0][0] = 0;
pt->fptr[0][nfibs] = nnz;
} /* implied barrier */
idx_t * const restrict fp = pt->fptr[0];
idx_t * const restrict fi = pt->fids[0];
/* go back over non-zeros and mark fptr and fids */
idx_t nfound = thread_nfibs[tid];
if(fi == NULL) {
for(idx_t n=nnz_start; n < nnz_end; ++n) {
/* check for end of outer index */
if(ttind[n] != ttind[n-1]) {
fp[nfound++] = n;
}
}
} else {
for(idx_t n=nnz_start; n < nnz_end; ++n) {
/* check for end of outer index */
if(ttind[n] != ttind[n-1]) {
fi[nfound] = ttind[n];
fp[nfound++] = n;
}
}
}
} /* end omp parallel */
free(thread_parts);
free(thread_nfibs);
}
/**
* @brief Construct the sparsity structure of any mode but the last. The first
* (root) mode is handled by p_mk_outerptr and the first is simply a copy
* of the nonzeros.
*
* @param ct The CSF tensor to construct.
* @param tt The coordinate tensor to construct from. Assumed to be already
* sorted.
* @param tile_id The ID of the tile to construct.
* @param nnztile_ptr A pointer into 'tt' that marks the start of each tile.
* @param mode Which mode we are constructing.
*/
static void p_mk_fptr(
splatt_csf * const ct,
sptensor_t const * const tt,
idx_t const tile_id,
idx_t const * const nnztile_ptr,
idx_t const mode)
{
assert(mode < ct->nmodes);
idx_t const nnzstart = nnztile_ptr[tile_id];
idx_t const nnzend = nnztile_ptr[tile_id+1];
idx_t const nnz = nnzend - nnzstart;
/* outer mode is easy; just look at outer indices */
if(mode == 0) {
p_mk_outerptr(ct, tt, tile_id, nnztile_ptr);
return;
}
/* the mode after accounting for dim_perm */
idx_t const * const restrict ttind =
nnzstart + tt->ind[csf_depth_to_mode(ct, mode)];
/* grab sparsity pattern */
csf_sparsity * const pt = ct->pt + tile_id;
/* we will edit this to point to the new fiber idxs instead of nnz */
idx_t * const restrict fprev = pt->fptr[mode-1];
/* partition among threads */
int const nthreads = splatt_omp_get_max_threads();
idx_t * thread_parts = partition_simple(pt->nfibs[mode-1], nthreads);
idx_t * thread_nfibs = malloc((nthreads+1) * sizeof(*thread_nfibs));
thread_nfibs[0] = 0;
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
idx_t const slice_start = thread_parts[tid];
idx_t const slice_end = thread_parts[tid+1];
/* first count nfibers */
/* foreach 'slice' in the previous dimension */
idx_t local_nfibs = 0;
for(idx_t s=slice_start; s < slice_end; ++s) {
++local_nfibs; /* one by default per 'slice' */
/* count fibers in current hyperplane*/
for(idx_t f=fprev[s]+1; f < fprev[s+1]; ++f) {
if(ttind[f] != ttind[f-1]) {
++local_nfibs;
}
}
}
thread_nfibs[tid+1] = local_nfibs; /* +1 for prefix sum */
idx_t const fprev_end = fprev[slice_end];
#pragma omp barrier
#pragma omp single
{
/* prefix sum on # fibers */
for(int t=0; t < nthreads; ++t) {
thread_nfibs[t+1] += thread_nfibs[t];
}
idx_t const nfibs = thread_nfibs[nthreads];
pt->nfibs[mode] = nfibs;
pt->fptr[mode] = malloc((nfibs+1) * sizeof(**(pt->fptr)));
pt->fptr[mode][0] = 0;
pt->fids[mode] = malloc(nfibs * sizeof(**(pt->fids)));
} /* implied barrier */
idx_t * const restrict fp = pt->fptr[mode];
idx_t * const restrict fi = pt->fids[mode];
/* now fill in fiber info */
idx_t nfound = thread_nfibs[tid];
for(idx_t s=slice_start; s < slice_end; ++s) {
idx_t const start = fprev[s]+1;
idx_t const end = (s == slice_end - 1) ? fprev_end : fprev[s+1];
/* mark start of subtree */
fprev[s] = nfound;
fi[nfound] = ttind[start-1];
fp[nfound++] = start-1;
/* mark fibers in current hyperplane */
for(idx_t f=start; f < end; ++f) {
if(ttind[f] != ttind[f-1]) {
fi[nfound] = ttind[f];
fp[nfound++] = f;
}
}
}
/* mark end of last hyperplane */
if(tid == nthreads - 1) {
fprev[pt->nfibs[mode-1]] = thread_nfibs[nthreads];
fp[thread_nfibs[nthreads]] = nnz;
}
} /* end omp parallel */
free(thread_parts);
free(thread_nfibs);
}
/**
* @brief Allocate and fill a CSF tensor from a coordinate tensor without
* tiling.
*
* @param ct The CSF tensor to fill out.
* @param tt The sparse tensor to start from.
*/
static void p_csf_alloc_untiled(
splatt_csf * const ct,
sptensor_t * const tt)
{
idx_t const nmodes = tt->nmodes;
tt_sort(tt, ct->dim_perm[0], ct->dim_perm);
ct->ntiles = 1;
ct->ntiled_modes = 0;
for(idx_t m=0; m < nmodes; ++m) {
ct->tile_dims[m] = 1;
}
ct->pt = malloc(sizeof(*(ct->pt)));
csf_sparsity * const pt = ct->pt;
/* last row of fptr is just nonzero inds */
pt->nfibs[nmodes-1] = ct->nnz;
pt->fids[nmodes-1] = malloc(ct->nnz * sizeof(**(pt->fids)));
pt->vals = malloc(ct->nnz * sizeof(*(pt->vals)));
par_memcpy(pt->fids[nmodes-1], tt->ind[csf_depth_to_mode(ct, nmodes-1)],
ct->nnz * sizeof(**(pt->fids)));
par_memcpy(pt->vals, tt->vals, ct->nnz * sizeof(*(pt->vals)));
/* setup a basic tile ptr for one tile */
idx_t nnz_ptr[2];
nnz_ptr[0] = 0;
nnz_ptr[1] = tt->nnz;
/* create fptr entries for the rest of the modes, working down from roots.
* Skip the bottom level (nnz) */
for(idx_t m=0; m < tt->nmodes-1; ++m) {
p_mk_fptr(ct, tt, 0, nnz_ptr, m);
}
}
/**
* @brief Reorder the nonzeros in a sparse tensor using dense tiling and fill
* a CSF tensor with the data.
*
* @param ct The CSF tensor to fill.
* @param tt The sparse tensor to start from.
* @param splatt_opts Options array for SPLATT - used for tile dimensions.
*/
static void p_csf_alloc_densetile(
splatt_csf * const ct,
sptensor_t * const tt,
double const * const splatt_opts)
{
idx_t const nmodes = tt->nmodes;
/* how many levels we tile (counting from the bottom) */
ct->ntiled_modes = (idx_t)splatt_opts[SPLATT_OPTION_TILELEVEL];
ct->ntiled_modes = SS_MIN(ct->ntiled_modes, ct->nmodes);
/* how many levels from the root do we start tiling? */
idx_t const tile_depth = ct->nmodes - ct->ntiled_modes;
idx_t ntiles = 1;
for(idx_t m=0; m < nmodes; ++m) {
idx_t const depth = csf_mode_to_depth(ct, m);
if(depth >= tile_depth) {
ct->tile_dims[m] = (idx_t) splatt_opts[SPLATT_OPTION_NTHREADS];
} else {
ct->tile_dims[m] = 1;
}
ntiles *= ct->tile_dims[m];
}
/* perform tensor tiling */
tt_sort(tt, ct->dim_perm[0], ct->dim_perm);
idx_t * nnz_ptr = tt_densetile(tt, ct->tile_dims);
ct->ntiles = ntiles;
ct->pt = malloc(ntiles * sizeof(*(ct->pt)));
for(idx_t t=0; t < ntiles; ++t) {
idx_t const startnnz = nnz_ptr[t];
idx_t const endnnz = nnz_ptr[t+1];
idx_t const ptnnz = endnnz - startnnz;
csf_sparsity * const pt = ct->pt + t;
/* empty tile */
if(ptnnz == 0) {
for(idx_t m=0; m < ct->nmodes; ++m) {
pt->fptr[m] = NULL;
pt->fids[m] = NULL;
pt->nfibs[m] = 0;
}
/* first fptr may be accessed anyway */
pt->fptr[0] = (idx_t *) malloc(2 * sizeof(**(pt->fptr)));
pt->fptr[0][0] = 0;
pt->fptr[0][1] = 0;
pt->vals = NULL;
continue;
}
idx_t const leaves = nmodes-1;
/* last row of fptr is just nonzero inds */
pt->nfibs[leaves] = ptnnz;
pt->fids[leaves] = malloc(ptnnz * sizeof(**(pt->fids)));
par_memcpy(pt->fids[leaves], tt->ind[csf_depth_to_mode(ct, leaves)] + startnnz,
ptnnz * sizeof(**(pt->fids)));
pt->vals = malloc(ptnnz * sizeof(*(pt->vals)));
par_memcpy(pt->vals, tt->vals + startnnz, ptnnz * sizeof(*(pt->vals)));
/* create fptr entries for the rest of the modes */
for(idx_t m=0; m < leaves; ++m) {
p_mk_fptr(ct, tt, t, nnz_ptr, m);
}
}
free(nnz_ptr);
}
/**
* @brief Construct dim_iperm, which is the inverse of dim_perm.
*
* @param ct The CSF tensor.
*/
static void p_fill_dim_iperm(
splatt_csf * const ct)
{
for(idx_t level=0; level < ct->nmodes; ++level) {
ct->dim_iperm[ct->dim_perm[level]] = level;
}
}
/**
* @brief Allocate and fill a CSF tensor.
*
* @param ct The CSF tensor to fill.
* @param tt The coordinate tensor to work from.
* @param mode_type The allocation scheme for the CSF tensor.
* @param mode Which mode we are converting for (if applicable).
* @param splatt_opts Used to determine tiling scheme.
*/
static void p_mk_csf(
splatt_csf * const ct,
sptensor_t * const tt,
csf_mode_type mode_type,
idx_t const mode,
double const * const splatt_opts)
{
ct->nnz = tt->nnz;
ct->nmodes = tt->nmodes;
for(idx_t m=0; m < tt->nmodes; ++m) {
ct->dims[m] = tt->dims[m];
}
/* get the indices in order */
csf_find_mode_order(tt->dims, tt->nmodes, mode_type, mode, ct->dim_perm);
p_fill_dim_iperm(ct);
ct->which_tile = splatt_opts[SPLATT_OPTION_TILE];
switch(ct->which_tile) {
case SPLATT_NOTILE:
p_csf_alloc_untiled(ct, tt);
break;
case SPLATT_DENSETILE:
p_csf_alloc_densetile(ct, tt, splatt_opts);
break;
default:
fprintf(stderr, "SPLATT: tiling '%d' unsupported for CSF tensors.\n",
ct->which_tile);
break;
}
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
void csf_free(
splatt_csf * const csf,
double const * const opts)
{
idx_t ntensors = 0;
splatt_csf_type which = opts[SPLATT_OPTION_CSF_ALLOC];
switch(which) {
case SPLATT_CSF_ONEMODE:
ntensors = 1;
break;
case SPLATT_CSF_TWOMODE:
ntensors = 2;
break;
case SPLATT_CSF_ALLMODE:
ntensors = csf[0].nmodes;
break;
}
for(idx_t i=0; i < ntensors; ++i) {
csf_free_mode(csf + i);
}
free(csf);
}
void csf_free_mode(
splatt_csf * const csf)
{
/* free each tile of sparsity pattern */
for(idx_t t=0; t < csf->ntiles; ++t) {
free(csf->pt[t].vals);
free(csf->pt[t].fids[csf->nmodes-1]);
for(idx_t m=0; m < csf->nmodes-1; ++m) {
free(csf->pt[t].fptr[m]);
free(csf->pt[t].fids[m]);
}
}
free(csf->pt);
}
void csf_find_mode_order(
idx_t const * const dims,
idx_t const nmodes,
csf_mode_type which,
idx_t const mode,
idx_t * const perm_dims)
{
switch(which) {
case CSF_SORTED_SMALLFIRST:
p_order_dims_small(dims, nmodes, perm_dims);
break;
case CSF_SORTED_BIGFIRST:
p_order_dims_large(dims, nmodes, perm_dims);
break;
case CSF_INORDER_MINUSONE:
p_order_dims_inorder(dims, nmodes, mode, perm_dims);
break;
case CSF_SORTED_MINUSONE:
p_order_dims_minusone(dims, nmodes, mode, perm_dims);
break;
/* no-op, perm_dims better be set... */
case CSF_MODE_CUSTOM:
break;
default:
fprintf(stderr, "SPLATT: csf_mode_type '%d' not recognized.\n", which);
break;
}
}
size_t csf_storage(
splatt_csf const * const tensors,
double const * const opts)
{
idx_t ntensors = 0;
splatt_csf_type which_alloc = opts[SPLATT_OPTION_CSF_ALLOC];
switch(which_alloc) {
case SPLATT_CSF_ONEMODE:
ntensors = 1;
break;
case SPLATT_CSF_TWOMODE:
ntensors = 2;
break;
case SPLATT_CSF_ALLMODE:
ntensors = tensors[0].nmodes;
break;
}
size_t bytes = 0;
for(idx_t m=0; m < ntensors; ++m) {
splatt_csf const * const ct = tensors + m;
bytes += ct->nnz * sizeof(*(ct->pt->vals)); /* vals */
bytes += ct->nnz * sizeof(**(ct->pt->fids)); /* fids[nmodes] */
bytes += ct->ntiles * sizeof(*(ct->pt)); /* pt */
for(idx_t t=0; t < ct->ntiles; ++t) {
csf_sparsity const * const pt = ct->pt + t;
for(idx_t m=0; m < ct->nmodes-1; ++m) {
bytes += (pt->nfibs[m]+1) * sizeof(**(pt->fptr)); /* fptr */
if(pt->fids[m] != NULL) {
bytes += pt->nfibs[m] * sizeof(**(pt->fids)); /* fids */
}
}
}
}
return bytes;
}
splatt_csf * csf_alloc(
sptensor_t * const tt,
double const * const opts)
{
splatt_csf * ret = NULL;
double * tmp_opts = NULL;
idx_t last_mode = 0;
int tmp = 0;
switch((splatt_csf_type) opts[SPLATT_OPTION_CSF_ALLOC]) {
case SPLATT_CSF_ONEMODE:
ret = malloc(sizeof(*ret));
p_mk_csf(ret, tt, CSF_SORTED_SMALLFIRST, 0, opts);
break;
case SPLATT_CSF_TWOMODE:
ret = malloc(2 * sizeof(*ret));
/* regular CSF allocation */
p_mk_csf(ret + 0, tt, CSF_SORTED_SMALLFIRST, 0, opts);
/* make a copy of opts and don't tile the last mode
* TODO make this configurable? */
tmp_opts = splatt_default_opts();
memcpy(tmp_opts, opts, SPLATT_OPTION_NOPTIONS * sizeof(*opts));
tmp_opts[SPLATT_OPTION_TILE] = SPLATT_NOTILE;
/* allocate with no tiling for the last mode */
last_mode = csf_depth_to_mode(&(ret[0]), tt->nmodes-1);
p_mk_csf(ret + 1, tt, CSF_SORTED_MINUSONE, last_mode, tmp_opts);
free(tmp_opts);
break;
case SPLATT_CSF_ALLMODE:
ret = malloc(tt->nmodes * sizeof(*ret));
for(idx_t m=0; m < tt->nmodes; ++m) {
p_mk_csf(ret + m, tt, CSF_SORTED_MINUSONE, m, opts);
}
break;
}
return ret;
}
void csf_alloc_mode(
sptensor_t * const tt,
csf_mode_type which_ordering,
idx_t const mode_special,
splatt_csf * const csf,
double const * const opts)
{
p_mk_csf(csf, tt, which_ordering, mode_special, opts);
}
val_t csf_frobsq(
splatt_csf const * const tensor)
{
/* accumulate into double to help with some precision loss */
double norm = 0;
#pragma omp parallel reduction(+:norm)
{
for(idx_t t=0; t < tensor->ntiles; ++t) {
val_t const * const vals = tensor->pt[t].vals;
if(vals == NULL) {
continue;
}
idx_t const nnz = tensor->pt[t].nfibs[tensor->nmodes-1];
#pragma omp for schedule(static) nowait
for(idx_t n=0; n < nnz; ++n) {
norm += vals[n] * vals[n];
}
}
} /* end omp parallel */
return (val_t) norm;
}
idx_t * csf_partition_1d(
splatt_csf const * const csf,
idx_t const tile_id,
idx_t const nparts)
{
idx_t const nslices = csf->pt[tile_id].nfibs[0];
idx_t * weights = malloc(nslices * sizeof(*weights));
#pragma omp parallel for schedule(static)
for(idx_t i=0; i < nslices; ++i) {
weights[i] = p_csf_count_nnz(csf->pt[tile_id].fptr, csf->nmodes, 0, i);
}
idx_t bneck;
idx_t * parts = partition_weighted(weights, nslices, nparts, &bneck);
free(weights);
return parts;
}
idx_t * csf_partition_tiles_1d(
splatt_csf const * const csf,
idx_t const nparts)
{
idx_t const nmodes = csf->nmodes;
idx_t const ntiles = csf->ntiles;
idx_t * weights = malloc(ntiles * sizeof(*weights));
#pragma omp parallel for schedule(static)
for(idx_t i=0; i < ntiles; ++i) {
weights[i] = csf->pt[i].nfibs[nmodes-1];
}
idx_t bneck;
idx_t * parts = partition_weighted(weights, ntiles, nparts, &bneck);
free(weights);
return parts;
}
|
pyfr_gemm_cm.c | /******************************************************************************
** Copyright (c) 2016-2017, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <mkl.h>
#include <libxsmm.h>
static double sec(struct timeval start, struct timeval end) {
return ((double)(((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)))) / 1.0e6;
}
int main(int argc, char *argv[])
{
int n,m,k;
int lda,ldb,ldc;
double* a;
double* b;
double* c1;
double* c2;
struct timeval l_start, l_end;
double l_total = 0.0;
int reps, i, j;
const int nblock = 16;
double alpha = 1.0, beta = 1.0;
char transa = 'N', transb = 'N';
libxsmm_gemm_prefetch_type l_prefetch_op = LIBXSMM_PREFETCH_NONE;
libxsmm_dmmfunction kernel = NULL;
if (argc != 5) {
fprintf(stderr, "Invalid ./a,out M N K reps\n");
//fprintf(stderr, "Invalid ./a,out M N K\n");
exit(-1);
}
m = atoi(argv[1]);
n = atoi(argv[2]);
k = atoi(argv[3]);
reps = atoi(argv[4]);
// this is col-major what you want to use
// for the sizes in question
lda = m;
ldb = k;
ldc = m;
if (n % nblock != 0) {
fprintf(stderr, "N needs to be divisable by %i\n", nblock);
exit(-1);
}
a = (double*)_mm_malloc(lda*k*sizeof(double), 64);
b = (double*)_mm_malloc(ldb*n*sizeof(double), 64);
c1 = (double*)_mm_malloc(ldc*n*sizeof(double), 64);
c2 = (double*)_mm_malloc(ldc*n*sizeof(double), 64);
#pragma omp parallel for
for (i = 0; i < lda*k; i++) {
a[i] = drand48();
}
#pragma omp parallel for
for (i = 0; i < ldb*n; i++) {
b[i] = drand48();
}
#pragma omp parallel for
for (i = 0; i < ldc*n; i++) {
c1[i] = 0;
c2[i] = 0;
}
// JIT Kernel
kernel = libxsmm_dmmdispatch(m, nblock, k, NULL, NULL, NULL, NULL, NULL, NULL, &l_prefetch_op );
// init MKL
dgemm(&transa, &transb, &m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc);
#pragma omp parallel for
for (i = 0; i < ldc*n; i++) {
c1[i] = 0;
c2[i] = 0;
}
gettimeofday(&l_start, NULL);
for ( j = 0; j < reps; j++ ) {
dgemm(&transa, &transb, &m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc);
}
gettimeofday(&l_end, NULL);
l_total = sec(l_start, l_end);
fprintf(stdout, "time[s] MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps );
fprintf(stdout, "GFLOPS MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total );
gettimeofday(&l_start, NULL);
for ( j = 0; j < reps; j++ ) {
#pragma omp parallel for private(i)
for ( i = 0; i < n; i+=nblock) {
kernel( a, b+(ldb*i), c2+(ldc*i), NULL, NULL, NULL );
}
gettimeofday(&l_end, NULL);
}
l_total = sec(l_start, l_end);
fprintf(stdout, "time[s] libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps );
fprintf(stdout, "GFLOPS libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total );
// test result
double max_error = 0.0;
for ( i = 0; i < ldc*n; i++) {
if (max_error < fabs(c1[i] - c2[i])) {
max_error = fabs(c1[i] - c2[i]);
}
}
printf("max error: %f\n\n", max_error);
}
|
symv_x_bsr_u_lo.c | #include "alphasparse/kernel.h"
#ifdef _OPENMP
#include<omp.h>
#endif
#include"alphasparse/opt.h"
#include<string.h>
#include "stdio.h"
#include <stdlib.h>
#include "alphasparse/util.h"
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_BSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT thread_num = alpha_get_thread_num();
const ALPHA_INT m = A->rows * A->block_size;
const ALPHA_INT n = A->cols * A->block_size;
const ALPHA_INT bs = A->block_size;
const ALPHA_INT bs2 = bs * bs;
ALPHA_INT b_rows = A->rows;
ALPHA_INT b_cols = A->cols;
if (b_rows != b_cols)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT partition[thread_num + 1];
balanced_partition_row_by_nnz(A->rows_end, b_rows, thread_num, partition);
ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num);
#ifdef _OPENMP
#pragma omp parallel num_threads(thread_num)
#endif
{
const ALPHA_INT tid = alpha_get_thread_id();
const ALPHA_INT local_m_s = partition[tid];
const ALPHA_INT local_m_e = partition[tid + 1];
tmp[tid] = (ALPHA_Number *)malloc(sizeof(ALPHA_Number) * b_rows * bs);
memset(tmp[tid], 0, sizeof(ALPHA_Number) * b_rows * bs);
if (A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR)
{
for (ALPHA_INT br = local_m_s; br < local_m_e; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br],block_end = A->rows_end[br];
ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start],&A->col_indx[block_end],br)-A->col_indx;
for(ALPHA_INT ai = block_start; ai < lower_end;ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
if (bc == br)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
for (ALPHA_INT b_col = 0; b_col < b_row; b_col++)
{
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
}
}
}
else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR)
{
for (ALPHA_INT br = 0; br < b_rows; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br],block_end = A->rows_end[br];
ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start],&A->col_indx[block_end],br)-A->col_indx;
for(ALPHA_INT ai = block_start; ai < lower_end;ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
ALPHA_Number val_orig;
ALPHA_Number temp_orig;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
for (ALPHA_INT b_row = b_col + 1; b_row < bs; b_row++)
{
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < b_cols * bs; ++i)
{
ALPHA_Number tmp_y;
alpha_setzero(tmp_y);
for (ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(tmp_y, tmp_y, tmp[j][i]);
}
alpha_mul(y[i], y[i], beta);
alpha_madde(y[i], x[i], alpha);
alpha_madde(y[i], tmp_y, alpha);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
free(tmp[i]);
}
free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/imperative.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/storage.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_MKLDNN == 1
#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() { return ::GetCurrentProcessId(); }
#else
inline size_t current_process_id() { return getpid(); }
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr,
const nnvm::dim_t end, const nnvm::dim_t idx_size) {
if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] ||
(i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const RType* indptr, const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i+1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 ||
(j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const nnvm::dim_t end, const nnvm::dim_t nrows) {
if ((i < end && idx[i+1] <= idx[i])
|| idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template<typename xpu>
void CheckFormatWrapper(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage)
<< "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) ||
(idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1, idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage)
<< "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0],
val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1, input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template<typename xpu>
void CheckFormatImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template<typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template<typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype) return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes,
const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
inline std::string attr_value_string(const nnvm::NodeAttrs& attrs,
const std::string& attr_name,
std::string default_val = "") {
if (attrs.dict.find(attr_name) == attrs.dict.end()) {
return default_val;
}
return attrs.dict.at(attr_name);
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name
<< "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log) return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning = "\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"This does not affect the correctness of the programme. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_MKLDNN == 1
if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. "
"You can re-enable by setting MXNET_MKLDNN_ENABLED=1");
if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template<typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len,
size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first+len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
thr.join();
std::inplace_merge(first, first+len/2, first+len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(first, last, num_threads,
std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template<typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ?
std::numeric_limits<T>::max():
size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
template <>
constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() {
return size_t(2) << 14;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype,
const mxnet::TShape &shape,
const Context &ctx,
const int dtype,
std::vector<NDArray> *vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template<typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
#pragma GCC diagnostic push
#if __GNUC__ >= 8
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
std::memcpy(dst, src, sizeof(DType) * size);
#pragma GCC diagnostic pop
}
}
/*!
* \breif parallelize add by OpenMP
*/
template<typename DType>
inline void ParallelAdd(DType* dst, const DType* src, index_t size) {
static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= add_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
} else {
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
void ExecuteMonInputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
void ExecuteMonOutputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
/*!
* \brief This is function can return the output names of a NodeEntry.
*/
static inline std::string GetOutputName(const nnvm::NodeEntry& e) {
nnvm::Symbol sym;
sym.outputs.push_back(e);
return sym.ListOutputNames()[0];
}
inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) {
// convert negative axes to positive values
const int ndim = src.ndim();
mxnet::TShape axes = src;
for (int i = 0; i < ndim; ++i) {
if (axes[i] < 0) {
axes[i] += ndim;
}
CHECK(axes[i] >= 0 && axes[i] < ndim) << "axes[" << i << "]="
<< axes[i] << " exceeds the range ["
<< 0 << ", " << ndim << ")";
}
return axes;
}
inline bool is_float(const int dtype) {
return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16;
}
inline bool is_int(const int dtype) {
return dtype == mshadow::kUint8 || dtype == mshadow::kInt8 ||
dtype == mshadow::kInt32 || dtype == mshadow::kInt64;
}
inline int get_more_precise_type(const int type1, const int type2) {
if (type1 == type2) return type1;
if (is_float(type1) && is_float(type2)) {
if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) {
return mshadow::kFloat64;
}
if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) {
return mshadow::kFloat32;
}
return mshadow::kFloat16;
} else if (is_float(type1) || is_float(type2)) {
return is_float(type1) ? type1 : type2;
}
if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) {
return mshadow::kInt64;
}
if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) {
return mshadow::kInt32;
}
CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)))
<< "1 is UInt8 and 1 is Int8 should not get here";
if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) {
return mshadow::kUint8;
}
return mshadow::kInt8;
}
inline int np_binary_out_infer_type(const int type1, const int type2) {
if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) {
return mshadow::kInt32;
}
return get_more_precise_type(type1, type2);
}
inline const std::string
NodeAttrsGetProfilerScope(const nnvm::NodeAttrs& attrs) {
// obtain the profiler scope name, if assigned previously
std::string profiler_scope = MXNET_STORAGE_DEFAULT_PROFILER_SCOPE_CSTR;
const std::unordered_map<std::string, std::string>& node_attrs_dict = attrs.dict;
const std::unordered_map<std::string, std::string>::const_iterator
profiler_scope_iter = node_attrs_dict.find("__profiler_scope__");
if (profiler_scope_iter != node_attrs_dict.end()) {
profiler_scope = profiler_scope_iter->second;
}
return profiler_scope;
}
inline int GetDefaultDtype() {
return Imperative::Get()->is_np_default_dtype() ?
mshadow::kFloat64 :
mshadow::kFloat32;
}
inline int GetDefaultDtype(int dtype) {
if (dtype != -1) return dtype;
return Imperative::Get()->is_np_default_dtype() ?
mshadow::kFloat64 :
mshadow::kFloat32;
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
flexIdentityOperator.h | #ifndef flexIdentityOperator_H
#define flexIdentityOperator_H
#include "vector"
#include "tools.h"
#include "flexLinearOperator.h"
//! represents an identiy operator
template<typename T>
class flexIdentityOperator : public flexLinearOperator<T>
{
#ifdef __CUDACC__
typedef thrust::device_vector<T> Tdata;
#else
typedef std::vector<T> Tdata;
#endif
public:
//! initializes the identiy operator
/*!
\param aNumRows number of rows
\param aNumCols number of cols
\param aMinus determines if operator is negated \sa isMinus
*/
flexIdentityOperator(int aNumRows, int aNumCols, bool aMinus) : flexLinearOperator<T>(aNumRows, aNumCols, identityOp, aMinus) {}
flexIdentityOperator<T>* copy()
{
flexIdentityOperator<T>* A = new flexIdentityOperator<T>(this->getNumRows(), this->getNumCols(), this->isMinus);
return A;
}
//apply linear operator to vector
void times(bool transposed, const Tdata &input, Tdata &output)
{
int numRows = this->getNumRows();
int numCols = this->getNumCols();
if (this->isMinus)
{
if(transposed)
{
#pragma omp parallel for
for (int i = 0; i < numCols; ++i)
{
if(numCols > numRows && i >= numRows)
output[i] = 0;
else
output[i] = -input[i];
}
}
else
{
#pragma omp parallel for
for (int i = 0; i < numRows; ++i)
{
if(numRows > numCols && i >= numCols)
output[i] = 0;
else
output[i] = -input[i];
}
}
}
else
{
if(transposed)
{
#pragma omp parallel for
for (int i = 0; i < numCols; ++i)
{
if(numCols > numRows && i >= numRows)
output[i] = 0;
else
output[i] = input[i];
}
}
else
{
#pragma omp parallel for
for (int i = 0; i < numRows; ++i)
{
if(numRows > numCols && i >= numCols)
output[i] = 0;
else
output[i] = input[i];
}
}
}
}
void timesPlus(bool transposed, const Tdata &input, Tdata &output)
{
if (this->isMinus)
{
doTimesMinus(transposed, input, output);
}
else
{
doTimesPlus(transposed, input, output);
}
}
void timesMinus(bool transposed, const Tdata &input, Tdata &output)
{
if (this->isMinus)
{
doTimesPlus(transposed, input, output);
}
else
{
doTimesMinus(transposed, input, output);
}
}
T getMaxRowSumAbs(bool transposed)
{
return static_cast<T>(1);
}
std::vector<T> getAbsRowSum(bool transposed)
{
std::vector<T> result;
if(transposed)
result = std::vector<T>(this->getNumCols(), (T)1);
else
result = std::vector<T>(this->getNumRows(), (T)1);
return result;
}
#ifdef __CUDACC__
thrust::device_vector<T> getAbsRowSumCUDA(bool transposed)
{
thrust::device_vector<T> result;
if(transposed)
result = thrust::device_vector<T>(this->getNumCols(), (T)1);
else
result = thrust::device_vector<T>(this->getNumRows(), (T)1);
return result;
}
#endif
private:
void doTimesPlus(bool transposed, const Tdata &input, Tdata &output)
{
int numRows = this->getNumRows();
int numCols = this->getNumCols();
if(transposed)
{
#ifdef __CUDACC__
if(numCols <= numRows)
thrust::transform(output.begin(), output.end(), input.begin(), output.begin(), thrust::plus<T>());
else
{
thrust::transform(output.begin(), output.begin() + numRows, input.begin(), output.begin(), thrust::plus<T>());
}
#else
#pragma omp parallel for
for (int i = 0; i < numCols; ++i)
{
if(numCols <= numRows || i < numRows)
output[i] += input[i];
}
#endif
}
else
{
#ifdef __CUDACC__
if(numRows <= numCols)
thrust::transform(output.begin(), output.end(), input.begin(), output.begin(), thrust::plus<T>());
else
{
thrust::transform(output.begin(), output.begin() + numCols, input.begin(), output.begin(), thrust::plus<T>());
}
#else
#pragma omp parallel for
for (int i = 0; i < numRows; ++i)
{
if(numRows <= numCols || i < numCols)
output[i] += input[i];
}
#endif
}
}
void doTimesMinus(bool transposed, const Tdata &input, Tdata &output)
{
int numRows = this->getNumRows();
int numCols = this->getNumCols();
if(transposed)
{
#ifdef __CUDACC__
if(numCols <= numRows)
thrust::transform(output.begin(), output.end(), input.begin(), output.begin(), thrust::minus<T>());
else
{
thrust::transform(output.begin(), output.begin() + numRows, input.begin(), output.begin(), thrust::minus<T>());
}
#else
#pragma omp parallel for
for (int i = 0; i < numCols; ++i)
{
if(numCols <= numRows || i < numRows)
output[i] -= input[i];
}
#endif
}
else
{
#ifdef __CUDACC__
if(numRows <= numCols)
thrust::transform(output.begin(), output.end(), input.begin(), output.begin(), thrust::minus<T>());
else
{
thrust::transform(output.begin(), output.begin() + numCols, input.begin(), output.begin(), thrust::minus<T>());
}
#else
#pragma omp parallel for
for (int i = 0; i < numRows; ++i)
{
if(numRows <= numCols || i < numCols)
output[i] -= input[i];
}
#endif
}
}
};
#endif
|
GB_binop__second_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__second_fc32
// A.*B function (eWiseMult): GB_AemultB__second_fc32
// A*D function (colscale): GB_AxD__second_fc32
// D*A function (rowscale): GB_DxB__second_fc32
// C+=B function (dense accum): GB_Cdense_accumB__second_fc32
// C+=b function (dense accum): GB_Cdense_accumb__second_fc32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_fc32
// C=scalar+B (none)
// C=scalar+B' (none)
// C=A+scalar GB_bind2nd__second_fc32
// C=A'+scalar GB_bind2nd_tran__second_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = bij
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = y ;
// op is second
#define GB_OP_IS_SECOND \
1
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FC32 || GxB_NO_SECOND_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__second_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__second_fc32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__second_fc32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__second_fc32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__second_fc32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__second_fc32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__second_fc32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__second_fc32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB_bind2nd_tran__second_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
updater_quantile_hist.h | /*!
* Copyright 2017-2022 by XGBoost Contributors
* \file updater_quantile_hist.h
* \brief use quantized feature values to construct a tree
* \author Philip Cho, Tianqi Chen, Egor Smirnov
*/
#ifndef XGBOOST_TREE_UPDATER_QUANTILE_HIST_H_
#define XGBOOST_TREE_UPDATER_QUANTILE_HIST_H_
#include <rabit/rabit.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xgboost/base.h"
#include "xgboost/data.h"
#include "xgboost/json.h"
#include "hist/evaluate_splits.h"
#include "hist/histogram.h"
#include "hist/expand_entry.h"
#include "constraints.h"
#include "./param.h"
#include "./driver.h"
#include "../common/random.h"
#include "../common/timer.h"
#include "../common/hist_util.h"
#include "../common/row_set.h"
#include "../common/partition_builder.h"
#include "../common/column_matrix.h"
namespace xgboost {
struct RandomReplace {
public:
// similar value as for minstd_rand
static constexpr uint64_t kBase = 16807;
static constexpr uint64_t kMod = static_cast<uint64_t>(1) << 63;
using EngineT = std::linear_congruential_engine<uint64_t, kBase, 0, kMod>;
/*
Right-to-left binary method: https://en.wikipedia.org/wiki/Modular_exponentiation
*/
static uint64_t SimpleSkip(uint64_t exponent, uint64_t initial_seed,
uint64_t base, uint64_t mod) {
CHECK_LE(exponent, mod);
uint64_t result = 1;
while (exponent > 0) {
if (exponent % 2 == 1) {
result = (result * base) % mod;
}
base = (base * base) % mod;
exponent = exponent >> 1;
}
// with result we can now find the new seed
return (result * initial_seed) % mod;
}
template<typename Condition, typename ContainerData>
static void MakeIf(Condition condition, const typename ContainerData::value_type replace_value,
const uint64_t initial_seed, const size_t ibegin,
const size_t iend, ContainerData* gpair) {
ContainerData& gpair_ref = *gpair;
const uint64_t displaced_seed = SimpleSkip(ibegin, initial_seed, kBase, kMod);
EngineT eng(displaced_seed);
for (size_t i = ibegin; i < iend; ++i) {
if (condition(i, eng)) {
gpair_ref[i] = replace_value;
}
}
}
};
namespace tree {
class HistRowPartitioner {
// heuristically chosen block size of parallel partitioning
static constexpr size_t kPartitionBlockSize = 2048;
// worker class that partition a block of rows
common::PartitionBuilder<kPartitionBlockSize> partition_builder_;
// storage for row index
common::RowSetCollection row_set_collection_;
/**
* \brief Turn split values into discrete bin indices.
*/
static void FindSplitConditions(const std::vector<CPUExpandEntry>& nodes, const RegTree& tree,
const GHistIndexMatrix& gmat,
std::vector<int32_t>* split_conditions);
/**
* \brief Update the row set for new splits specifed by nodes.
*/
void AddSplitsToRowSet(const std::vector<CPUExpandEntry>& nodes, RegTree const* p_tree);
public:
bst_row_t base_rowid = 0;
public:
HistRowPartitioner(size_t n_samples, size_t base_rowid, int32_t n_threads) {
row_set_collection_.Clear();
const size_t block_size = n_samples / n_threads + !!(n_samples % n_threads);
dmlc::OMPException exc;
std::vector<size_t>& row_indices = *row_set_collection_.Data();
row_indices.resize(n_samples);
size_t* p_row_indices = row_indices.data();
// parallel initialization o f row indices. (std::iota)
#pragma omp parallel num_threads(n_threads)
{
exc.Run([&]() {
const size_t tid = omp_get_thread_num();
const size_t ibegin = tid * block_size;
const size_t iend = std::min(static_cast<size_t>(ibegin + block_size), n_samples);
for (size_t i = ibegin; i < iend; ++i) {
p_row_indices[i] = i + base_rowid;
}
});
}
row_set_collection_.Init();
this->base_rowid = base_rowid;
}
template <bool any_missing, bool any_cat>
void UpdatePosition(GenericParameter const* ctx, GHistIndexMatrix const& gmat,
common::ColumnMatrix const& column_matrix,
std::vector<CPUExpandEntry> const& nodes, RegTree const* p_tree) {
// 1. Find split condition for each split
const size_t n_nodes = nodes.size();
std::vector<int32_t> split_conditions;
FindSplitConditions(nodes, *p_tree, gmat, &split_conditions);
// 2.1 Create a blocked space of size SUM(samples in each node)
common::BlockedSpace2d space(
n_nodes,
[&](size_t node_in_set) {
int32_t nid = nodes[node_in_set].nid;
return row_set_collection_[nid].Size();
},
kPartitionBlockSize);
// 2.2 Initialize the partition builder
// allocate buffers for storage intermediate results by each thread
partition_builder_.Init(space.Size(), n_nodes, [&](size_t node_in_set) {
const int32_t nid = nodes[node_in_set].nid;
const size_t size = row_set_collection_[nid].Size();
const size_t n_tasks = size / kPartitionBlockSize + !!(size % kPartitionBlockSize);
return n_tasks;
});
CHECK_EQ(base_rowid, gmat.base_rowid);
// 2.3 Split elements of row_set_collection_ to left and right child-nodes for each node
// Store results in intermediate buffers from partition_builder_
common::ParallelFor2d(space, ctx->Threads(), [&](size_t node_in_set, common::Range1d r) {
size_t begin = r.begin();
const int32_t nid = nodes[node_in_set].nid;
const size_t task_id = partition_builder_.GetTaskIdx(node_in_set, begin);
partition_builder_.AllocateForTask(task_id);
switch (column_matrix.GetTypeSize()) {
case common::kUint8BinsTypeSize:
partition_builder_.template Partition<uint8_t, any_missing, any_cat>(
node_in_set, nid, r, split_conditions[node_in_set], gmat, column_matrix, *p_tree,
row_set_collection_[nid].begin);
break;
case common::kUint16BinsTypeSize:
partition_builder_.template Partition<uint16_t, any_missing, any_cat>(
node_in_set, nid, r, split_conditions[node_in_set], gmat, column_matrix, *p_tree,
row_set_collection_[nid].begin);
break;
case common::kUint32BinsTypeSize:
partition_builder_.template Partition<uint32_t, any_missing, any_cat>(
node_in_set, nid, r, split_conditions[node_in_set], gmat, column_matrix, *p_tree,
row_set_collection_[nid].begin);
break;
default:
// no default behavior
CHECK(false) << column_matrix.GetTypeSize();
}
});
// 3. Compute offsets to copy blocks of row-indexes
// from partition_builder_ to row_set_collection_
partition_builder_.CalculateRowOffsets();
// 4. Copy elements from partition_builder_ to row_set_collection_ back
// with updated row-indexes for each tree-node
common::ParallelFor2d(space, ctx->Threads(), [&](size_t node_in_set, common::Range1d r) {
const int32_t nid = nodes[node_in_set].nid;
partition_builder_.MergeToArray(node_in_set, r.begin(),
const_cast<size_t*>(row_set_collection_[nid].begin));
});
// 5. Add info about splits into row_set_collection_
AddSplitsToRowSet(nodes, p_tree);
}
void UpdatePosition(GenericParameter const* ctx, GHistIndexMatrix const& page,
std::vector<CPUExpandEntry> const& applied, RegTree const* p_tree) {
auto const& column_matrix = page.Transpose();
if (page.cut.HasCategorical()) {
if (column_matrix.AnyMissing()) {
this->template UpdatePosition<true, true>(ctx, page, column_matrix, applied, p_tree);
} else {
this->template UpdatePosition<false, true>(ctx, page, column_matrix, applied, p_tree);
}
} else {
if (column_matrix.AnyMissing()) {
this->template UpdatePosition<true, false>(ctx, page, column_matrix, applied, p_tree);
} else {
this->template UpdatePosition<false, false>(ctx, page, column_matrix, applied, p_tree);
}
}
}
auto const& Partitions() const { return row_set_collection_; }
size_t Size() const {
return std::distance(row_set_collection_.begin(), row_set_collection_.end());
}
void LeafPartition(Context const* ctx, RegTree const& tree,
common::Span<GradientPair const> gpair,
std::vector<bst_node_t>* p_out_position) const {
partition_builder_.LeafPartition(
ctx, tree, this->Partitions(), p_out_position,
[&](size_t idx) -> bool { return gpair[idx].GetHess() - .0f == .0f; });
}
auto& operator[](bst_node_t nidx) { return row_set_collection_[nidx]; }
auto const& operator[](bst_node_t nidx) const { return row_set_collection_[nidx]; }
};
inline BatchParam HistBatch(TrainParam const& param) {
return {param.max_bin, param.sparse_threshold};
}
/*! \brief construct a tree using quantized feature values */
class QuantileHistMaker: public TreeUpdater {
public:
explicit QuantileHistMaker(GenericParameter const* ctx, ObjInfo task)
: TreeUpdater(ctx), task_{task} {}
void Configure(const Args& args) override;
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override;
bool UpdatePredictionCache(const DMatrix *data,
linalg::VectorView<float> out_preds) override;
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("train_param"), &this->param_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["train_param"] = ToJson(param_);
}
char const* Name() const override {
return "grow_quantile_histmaker";
}
bool HasNodePosition() const override { return true; }
protected:
// training parameter
TrainParam param_;
// actual builder that runs the algorithm
struct Builder {
public:
// constructor
explicit Builder(const size_t n_trees, const TrainParam& param, DMatrix const* fmat,
ObjInfo task, GenericParameter const* ctx)
: n_trees_(n_trees),
param_(param),
p_last_fmat_(fmat),
histogram_builder_{new HistogramBuilder<CPUExpandEntry>},
task_{task},
ctx_{ctx},
monitor_{std::make_unique<common::Monitor>()} {
monitor_->Init("Quantile::Builder");
}
// update one tree, growing
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree,
HostDeviceVector<bst_node_t>* p_out_position);
bool UpdatePredictionCache(DMatrix const* data, linalg::VectorView<float> out_preds) const;
private:
// initialize temp data structure
void InitData(DMatrix* fmat, const RegTree& tree, std::vector<GradientPair>* gpair);
size_t GetNumberOfTrees();
void InitSampling(const DMatrix& fmat, std::vector<GradientPair>* gpair);
CPUExpandEntry InitRoot(DMatrix* p_fmat, RegTree* p_tree,
const std::vector<GradientPair>& gpair_h);
void BuildHistogram(DMatrix* p_fmat, RegTree* p_tree,
std::vector<CPUExpandEntry> const& valid_candidates,
std::vector<GradientPair> const& gpair);
void LeafPartition(RegTree const& tree, common::Span<GradientPair const> gpair,
std::vector<bst_node_t>* p_out_position);
void ExpandTree(DMatrix* p_fmat, RegTree* p_tree, const std::vector<GradientPair>& gpair_h,
HostDeviceVector<bst_node_t>* p_out_position);
private:
const size_t n_trees_;
const TrainParam& param_;
std::shared_ptr<common::ColumnSampler> column_sampler_{
std::make_shared<common::ColumnSampler>()};
std::vector<GradientPair> gpair_local_;
std::unique_ptr<HistEvaluator<CPUExpandEntry>> evaluator_;
std::vector<HistRowPartitioner> partitioner_;
// back pointers to tree and data matrix
const RegTree* p_last_tree_{nullptr};
DMatrix const* const p_last_fmat_;
std::unique_ptr<HistogramBuilder<CPUExpandEntry>> histogram_builder_;
ObjInfo task_;
// Context for number of threads
Context const* ctx_;
std::unique_ptr<common::Monitor> monitor_;
};
protected:
std::unique_ptr<Builder> pimpl_;
ObjInfo task_;
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_UPDATER_QUANTILE_HIST_H_
|
convolution_sgemm_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static inline signed char float2int8(float v)
{
int int32 = round(v);
if (int32 > 127) return 127;
if (int32 < -127) return -127;
return (signed char)int32;
}
static void conv_im2col_sgemm_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, \
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
// im2row
Mat bottom_im2row(kernel_h*kernel_w*inch, outw*outh, 1UL, opt.workspace_allocator);
{
signed char* ret = (signed char*)bottom_im2row;
int retID = 0;
for (int i=0; i<outh; i++)
{
for (int j=0; j<outw; j++)
{
for (int p=0; p<inch; p++)
{
const signed char* input = bottom_blob.channel(p);
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int K = kernel_w * kernel_h * inch; // ksize * inch
// bottom_im2row memory packed 4 x 4
Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_im2row.row<signed char>(i);
const signed char* img1 = bottom_im2row.row<signed char>(i+1);
const signed char* img2 = bottom_im2row.row<signed char>(i+2);
const signed char* img3 = bottom_im2row.row<signed char>(i+3);
signed char* tmpptr = bottom_tm.channel(i/4);
int q = 0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img1[0];
tmpptr[3] = img1[1];
tmpptr[4] = img2[0];
tmpptr[5] = img2[1];
tmpptr[6] = img3[0];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += 2;
img1 += 2;
img2 += 2;
img3 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += 1;
img1 += 1;
img2 += 1;
img3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const signed char* img0 = bottom_im2row.row<signed char>(i);
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += 1;
}
}
}
// kernel memory packed 4 x 4
Mat kernel_tm(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u, opt.workspace_allocator);
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
const signed char* k1 = kernel + (p+1)*inch*kernel_size;
const signed char* k2 = kernel + (p+2)*inch*kernel_size;
const signed char* k3 = kernel + (p+3)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4);
int q=0;
for (; q+1<inch*kernel_size; q+=2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
// 4x4
// sgemm(int M, int N, int K, float* A, float* B, float* C)
{
// int M = outch; // outch
// int N = outw * outh; // outsize or out stride
// int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 4;
int* output0 = top_blob.channel(i);
int* output1 = top_blob.channel(i+1);
int* output2 = top_blob.channel(i+2);
int* output3 = top_blob.channel(i+3);
int j=0;
for (; j+3<N; j=j+4)
{
signed char* vb = bottom_tm.channel(j/4);
signed char* va = kernel_tm.channel(i/4);
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[2*n]; // k0
sum0[n] += (int)va[1] * vb[2*n+1];
sum1[n] += (int)va[2] * vb[2*n]; // k1
sum1[n] += (int)va[3] * vb[2*n+1];
sum2[n] += (int)va[4] * vb[2*n]; // k2
sum2[n] += (int)va[5] * vb[2*n+1];
sum3[n] += (int)va[6] * vb[2*n]; // k3
sum3[n] += (int)va[7] * vb[2*n+1];
}
va += 8;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n=0; n<4; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j<N; j++)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
signed char* vb = bottom_tm.channel(j/4 + j%4);
signed char* va = kernel_tm.channel(i/4);
int k=0;
for (; k+1<K; k=k+2)
{
sum0 += (int)va[0] * vb[0];
sum0 += (int)va[1] * vb[1];
sum1 += (int)va[2] * vb[0];
sum1 += (int)va[3] * vb[1];
sum2 += (int)va[4] * vb[0];
sum2 += (int)va[5] * vb[1];
sum3 += (int)va[6] * vb[0];
sum3 += (int)va[7] * vb[1];
va += 8;
vb += 2;
}
for (; k<K; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_outch_start; i<outch; i++)
{
int* output = top_blob.channel(i);
int j=0;
for (; j+3<N; j=j+4)
{
signed char* vb = bottom_tm.channel(j/4);
signed char* va = kernel_tm.channel(i/4 + i%4);
int sum[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[2*n];
sum[n] += (int)va[1] * vb[2*n+1];
}
va += 2;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n=0; n<4; n++)
{
output[n] = sum[n];
}
output += 4;
}
for (; j<N; j++)
{
int sum = 0;
signed char* vb = bottom_tm.channel(j/4 + j%4);
signed char* va = kernel_tm.channel(i/4 + i%4);
for (int k=0; k<K; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum;
output++;
}
}
}
// // sgemm(int M, int N, int K, float* A, float* B, float* C)
// {
// for (int i=0; i<M; i++)
// {
// int* output = top_blob.channel(i);
// for (int j=0; j<N; j++)
// {
// int sum = 0;
// signed char* vb = (signed char*)bottom_im2row + K * j;
// const signed char* va = kernel + K * i;
// for (int k=0; k<K; k++)
// {
// sum += (int)va[0] * vb[0];
// va += 1;
// vb += 1;
// }
// output[0] = sum;
// output++;
// }
// }
// }
}
static void conv_im2col_sgemm_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, \
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat &_bias, std::vector<float> scale_dequant, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
const float* bias = _bias;
// im2row
Mat bottom_im2row(kernel_h*kernel_w*inch, outw*outh, 1UL, opt.workspace_allocator);
{
signed char* ret = (signed char*)bottom_im2row;
int retID = 0;
for (int i=0; i<outh; i++)
{
for (int j=0; j<outw; j++)
{
for (int p=0; p<inch; p++)
{
const signed char* input = bottom_blob.channel(p);
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int K = kernel_w * kernel_h * inch; // ksize * inch
// bottom_im2row memory packed 4 x 4
Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_im2row.row<signed char>(i);
const signed char* img1 = bottom_im2row.row<signed char>(i+1);
const signed char* img2 = bottom_im2row.row<signed char>(i+2);
const signed char* img3 = bottom_im2row.row<signed char>(i+3);
signed char* tmpptr = bottom_tm.channel(i/4);
int q = 0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img1[0];
tmpptr[3] = img1[1];
tmpptr[4] = img2[0];
tmpptr[5] = img2[1];
tmpptr[6] = img3[0];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += 2;
img1 += 2;
img2 += 2;
img3 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += 1;
img1 += 1;
img2 += 1;
img3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const signed char* img0 = bottom_im2row.row<signed char>(i);
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += 1;
}
}
}
// kernel memory packed 4 x 4
Mat kernel_tm(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u, opt.workspace_allocator);
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
const signed char* k1 = kernel + (p+1)*inch*kernel_size;
const signed char* k2 = kernel + (p+2)*inch*kernel_size;
const signed char* k3 = kernel + (p+3)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4);
int q=0;
for (; q+1<inch*kernel_size; q+=2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
// 4x4
// sgemm(int M, int N, int K, float* A, float* B, float* C)
{
// int M = outch; // outch
// int N = outw * outh; // outsize or out stride
// int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 4;
const float bias0 = bias ? bias[i] : 0.f;
const float bias1 = bias ? bias[i+1] : 0.f;
const float bias2 = bias ? bias[i+2] : 0.f;
const float bias3 = bias ? bias[i+3] : 0.f;
const float scale_dequant0 = scale_dequant[i];
const float scale_dequant1 = scale_dequant[i+1];
const float scale_dequant2 = scale_dequant[i+2];
const float scale_dequant3 = scale_dequant[i+3];
float* output0 = top_blob.channel(i);
float* output1 = top_blob.channel(i+1);
float* output2 = top_blob.channel(i+2);
float* output3 = top_blob.channel(i+3);
int j=0;
for (; j+3<N; j=j+4)
{
signed char* vb = bottom_tm.channel(j/4);
signed char* va = kernel_tm.channel(i/4);
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[2*n]; // k0
sum0[n] += (int)va[1] * vb[2*n+1];
sum1[n] += (int)va[2] * vb[2*n]; // k1
sum1[n] += (int)va[3] * vb[2*n+1];
sum2[n] += (int)va[4] * vb[2*n]; // k2
sum2[n] += (int)va[5] * vb[2*n+1];
sum3[n] += (int)va[6] * vb[2*n]; // k3
sum3[n] += (int)va[7] * vb[2*n+1];
}
va += 8;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n=0; n<4; n++)
{
output0[n] = (float)sum0[n] * scale_dequant0 + bias0;
output1[n] = (float)sum1[n] * scale_dequant1 + bias1;
output2[n] = (float)sum2[n] * scale_dequant2 + bias2;
output3[n] = (float)sum3[n] * scale_dequant3 + bias3;
}
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j<N; j++)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
signed char* vb = bottom_tm.channel(j/4 + j%4);
signed char* va = kernel_tm.channel(i/4);
int k=0;
for (; k+1<K; k=k+2)
{
sum0 += (int)va[0] * vb[0];
sum0 += (int)va[1] * vb[1];
sum1 += (int)va[2] * vb[0];
sum1 += (int)va[3] * vb[1];
sum2 += (int)va[4] * vb[0];
sum2 += (int)va[5] * vb[1];
sum3 += (int)va[6] * vb[0];
sum3 += (int)va[7] * vb[1];
va += 8;
vb += 2;
}
for (; k<K; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = (float)sum0 * scale_dequant0 + bias0;
output1[0] = (float)sum1 * scale_dequant1 + bias1;
output2[0] = (float)sum2 * scale_dequant2 + bias2;
output3[0] = (float)sum3 * scale_dequant3 + bias3;
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_outch_start; i<outch; i++)
{
float* output = top_blob.channel(i);
const float bias0 = bias ? bias[i] : 0.f;
const float scale_dequant0 = scale_dequant[i];
int j=0;
for (; j+3<N; j=j+4)
{
signed char* vb = bottom_tm.channel(j/4);
signed char* va = kernel_tm.channel(i/4 + i%4);
int sum[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[2*n];
sum[n] += (int)va[1] * vb[2*n+1];
}
va += 2;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n=0; n<4; n++)
{
output[n] = (float)sum[n] * scale_dequant0 + bias0;
}
output += 4;
}
for (; j<N; j++)
{
int sum = 0;
signed char* vb = bottom_tm.channel(j/4 + j%4);
signed char* va = kernel_tm.channel(i/4 + i%4);
for (int k=0; k<K; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = (float)sum * scale_dequant0 + bias0;
output++;
}
}
}
// // sgemm(int M, int N, int K, float* A, float* B, float* C)
// {
// for (int i=0; i<M; i++)
// {
// int* output = top_blob.channel(i);
// for (int j=0; j<N; j++)
// {
// int sum = 0;
// signed char* vb = (signed char*)bottom_im2row + K * j;
// const signed char* va = kernel + K * i;
// for (int k=0; k<K; k++)
// {
// sum += (int)va[0] * vb[0];
// va += 1;
// vb += 1;
// }
// output[0] = sum;
// output++;
// }
// }
// }
}
static void conv_im2col_sgemm_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, \
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat &_bias, std::vector<float> scale_requant, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
const float* bias = _bias;
// im2row
Mat bottom_im2row(kernel_h*kernel_w*inch, outw*outh, 1UL, opt.workspace_allocator);
{
signed char* ret = (signed char*)bottom_im2row;
int retID = 0;
for (int i=0; i<outh; i++)
{
for (int j=0; j<outw; j++)
{
for (int p=0; p<inch; p++)
{
const signed char* input = bottom_blob.channel(p);
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int K = kernel_w * kernel_h * inch; // ksize * inch
// bottom_im2row memory packed 4 x 4
Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_im2row.row<signed char>(i);
const signed char* img1 = bottom_im2row.row<signed char>(i+1);
const signed char* img2 = bottom_im2row.row<signed char>(i+2);
const signed char* img3 = bottom_im2row.row<signed char>(i+3);
signed char* tmpptr = bottom_tm.channel(i/4);
int q = 0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img1[0];
tmpptr[3] = img1[1];
tmpptr[4] = img2[0];
tmpptr[5] = img2[1];
tmpptr[6] = img3[0];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += 2;
img1 += 2;
img2 += 2;
img3 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += 1;
img1 += 1;
img2 += 1;
img3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const signed char* img0 = bottom_im2row.row<signed char>(i);
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += 1;
}
}
}
// kernel memory packed 4 x 4
Mat kernel_tm(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u, opt.workspace_allocator);
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
const signed char* k1 = kernel + (p+1)*inch*kernel_size;
const signed char* k2 = kernel + (p+2)*inch*kernel_size;
const signed char* k3 = kernel + (p+3)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4);
int q=0;
for (; q+1<inch*kernel_size; q+=2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
// 4x4
// sgemm(int M, int N, int K, float* A, float* B, float* C)
{
// int M = outch; // outch
// int N = outw * outh; // outsize or out stride
// int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 4;
signed char* output0 = top_blob.channel(i);
signed char* output1 = top_blob.channel(i+1);
signed char* output2 = top_blob.channel(i+2);
signed char* output3 = top_blob.channel(i+3);
const float bias0 = bias ? bias[i] : 0.f;
const float bias1 = bias ? bias[i+1] : 0.f;
const float bias2 = bias ? bias[i+2] : 0.f;
const float bias3 = bias ? bias[i+3] : 0.f;
const float scale_requant_in0 = scale_requant[2*i];
const float scale_requant_out0 = scale_requant[2*i+1];
const float scale_requant_in1 = scale_requant[2*(i+1)];
const float scale_requant_out1 = scale_requant[2*(i+1)+1];
const float scale_requant_in2 = scale_requant[2*(i+2)];
const float scale_requant_out2 = scale_requant[2*(i+2)+1];
const float scale_requant_in3 = scale_requant[2*(i+3)];
const float scale_requant_out3 = scale_requant[2*(i+3)+1];
int j=0;
for (; j+3<N; j=j+4)
{
signed char* vb = bottom_tm.channel(j/4);
signed char* va = kernel_tm.channel(i/4);
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[2*n]; // k0
sum0[n] += (int)va[1] * vb[2*n+1];
sum1[n] += (int)va[2] * vb[2*n]; // k1
sum1[n] += (int)va[3] * vb[2*n+1];
sum2[n] += (int)va[4] * vb[2*n]; // k2
sum2[n] += (int)va[5] * vb[2*n+1];
sum3[n] += (int)va[6] * vb[2*n]; // k3
sum3[n] += (int)va[7] * vb[2*n+1];
}
va += 8;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n=0; n<4; n++)
{
output0[n] = float2int8(((float)sum0[n] * scale_requant_in0 + bias0) * scale_requant_out0);
output1[n] = float2int8(((float)sum1[n] * scale_requant_in1 + bias1) * scale_requant_out1);
output2[n] = float2int8(((float)sum2[n] * scale_requant_in2 + bias2) * scale_requant_out2);
output3[n] = float2int8(((float)sum3[n] * scale_requant_in3 + bias3) * scale_requant_out3);
}
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j<N; j++)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
signed char* vb = bottom_tm.channel(j/4 + j%4);
signed char* va = kernel_tm.channel(i/4);
int k=0;
for (; k+1<K; k=k+2)
{
sum0 += (int)va[0] * vb[0];
sum0 += (int)va[1] * vb[1];
sum1 += (int)va[2] * vb[0];
sum1 += (int)va[3] * vb[1];
sum2 += (int)va[4] * vb[0];
sum2 += (int)va[5] * vb[1];
sum3 += (int)va[6] * vb[0];
sum3 += (int)va[7] * vb[1];
va += 8;
vb += 2;
}
for (; k<K; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0);
output1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1);
output2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2);
output3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3);
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_outch_start; i<outch; i++)
{
signed char* output = top_blob.channel(i);
const float bias0 = bias ? bias[i] : 0.f;
const float scale_requant_in0 = scale_requant[2*i];
const float scale_requant_out0 = scale_requant[2*i+1];
int j=0;
for (; j+3<N; j=j+4)
{
signed char* vb = bottom_tm.channel(j/4);
signed char* va = kernel_tm.channel(i/4 + i%4);
int sum[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[2*n];
sum[n] += (int)va[1] * vb[2*n+1];
}
va += 2;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n=0; n<4; n++)
{
output[n] = float2int8(((float)sum[n] * scale_requant_in0 + bias0) * scale_requant_out0);
}
output += 4;
}
for (; j<N; j++)
{
int sum = 0;
signed char* vb = bottom_tm.channel(j/4 + j%4);
signed char* va = kernel_tm.channel(i/4 + i%4);
for (int k=0; k<K; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = float2int8(((float)sum * scale_requant_in0 + bias0) * scale_requant_out0);
output++;
}
}
}
// // sgemm(int M, int N, int K, float* A, float* B, float* C)
// {
// for (int i=0; i<M; i++)
// {
// int* output = top_blob.channel(i);
// for (int j=0; j<N; j++)
// {
// int sum = 0;
// signed char* vb = (signed char*)bottom_im2row + K * j;
// const signed char* va = kernel + K * i;
// for (int k=0; k<K; k++)
// {
// sum += (int)va[0] * vb[0];
// va += 1;
// vb += 1;
// }
// output[0] = sum;
// output++;
// }
// }
// }
}
|
GB_binop__islt_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int16)
// A*D function (colscale): GB (_AxD__islt_int16)
// D*A function (rowscale): GB (_DxB__islt_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int16)
// C=scalar+B GB (_bind1st__islt_int16)
// C=scalar+B' GB (_bind1st_tran__islt_int16)
// C=A+scalar GB (_bind2nd__islt_int16)
// C=A'+scalar GB (_bind2nd_tran__islt_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_INT16 || GxB_NO_ISLT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__identity_bool_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_fp64
// op(A') function: GB_tran__identity_bool_fp64
// C type: bool
// A type: double
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_fp64
(
bool *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
opi.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
int main(int argc, char** argv) {
//seed random number generator
// Q2b: get the number of threads to run with from agrv and
// add OpenMP API code to set number of threads here
int Nthreads = atoi(argv[1]);
omp_set_num_threads(Nthreads);
double time = omp_get_wtime();
struct drand48_data *drandData;
drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data));
// Q2c: add an OpenMP parallel region here, wherein each thread initializes
// one entry in drandData using srand48_r and seed based on thread number
#pragma omp parallel
{
int rank = omp_get_thread_num();
long int seed = rank;
srand48_r(seed, drandData+rank);
}
long long int Ntrials = 10000000/Nthreads;
//need running tallies
long long int Ntotal=0;
long long int Ncircle=0;
#pragma omp parallel reduction(+:Ncircle,Ntotal)
{
#pragma omp for
for (long long int n=0; n<Ntrials; n++) {
double rand1;
double rand2;
//gererate two random numbers (use the thread id to offset drandData)
drand48_r(drandData+omp_get_thread_num(), &rand1);
drand48_r(drandData+omp_get_thread_num(), &rand2);
double x = -1 + 2*rand1; //shift to [-1,1]
double y = -1 + 2*rand2;
//check if its in the circle
if (sqrt(x*x+y*y)<=1) Ncircle++;
Ntotal++;
/*if (n%100 ==0) {
double pi = 4.0*Ncircle/ (double) (n);
printf("Our estimate of pi is %g \n", pi);
}*/
}
}
double pi = 4.0*Ncircle/ (double) (Ntotal);
printf("Our final estimate of pi is %g \n", pi);
printf("Running time: %f\n", omp_get_wtime()-time);
free(drandData);
return 0;
}
|
GB_unaryop__lnot_int64_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int64_uint8
// op(A') function: GB_tran__lnot_int64_uint8
// C type: int64_t
// A type: uint8_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int64_uint8
(
int64_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int64_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c-omp.c | /* This file contains routines to construct GNU OpenMP constructs,
called from parsing in the C and C++ front ends.
Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>,
Diego Novillo <dnovillo@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "function.h"
#include "c-common.h"
#include "toplev.h"
#include "tree-gimple.h"
#include "bitmap.h"
#include "langhooks.h"
/* Complete a #pragma omp master construct. STMT is the structured-block
that follows the pragma. */
tree
c_finish_omp_master (tree stmt)
{
return add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
}
/* Complete a #pragma omp critical construct. STMT is the structured-block
that follows the pragma, NAME is the identifier in the pragma, or null
if it was omitted. */
tree
c_finish_omp_critical (tree body, tree name)
{
tree stmt = make_node (OMP_CRITICAL);
TREE_TYPE (stmt) = void_type_node;
OMP_CRITICAL_BODY (stmt) = body;
OMP_CRITICAL_NAME (stmt) = name;
return add_stmt (stmt);
}
/* Complete a #pragma omp ordered construct. STMT is the structured-block
that follows the pragma. */
tree
c_finish_omp_ordered (tree stmt)
{
return add_stmt (build1 (OMP_ORDERED, void_type_node, stmt));
}
/* Complete a #pragma omp barrier construct. */
void
c_finish_omp_barrier (void)
{
tree x;
x = built_in_decls[BUILT_IN_GOMP_BARRIER];
x = build_function_call_expr (x, NULL);
add_stmt (x);
}
/* Complete a #pragma omp taskwait construct. */
void
c_finish_omp_taskwait (void)
{
tree x;
x = built_in_decls[BUILT_IN_GOMP_TASKWAIT];
x = build_function_call_expr (x, NULL);
add_stmt (x);
}
/* Complete a #pragma omp atomic construct. The expression to be
implemented atomically is LHS code= RHS. The value returned is
either error_mark_node (if the construct was erroneous) or an
OMP_ATOMIC node which should be added to the current statement tree
with add_stmt. */
tree
c_finish_omp_atomic (enum tree_code code, tree lhs, tree rhs)
{
tree x, type, addr;
if (lhs == error_mark_node || rhs == error_mark_node)
return error_mark_node;
/* ??? According to one reading of the OpenMP spec, complex type are
supported, but there are no atomic stores for any architecture.
But at least icc 9.0 doesn't support complex types here either.
And lets not even talk about vector types... */
type = TREE_TYPE (lhs);
if (!INTEGRAL_TYPE_P (type)
&& !POINTER_TYPE_P (type)
&& !SCALAR_FLOAT_TYPE_P (type))
{
error ("invalid expression type for %<#pragma omp atomic%>");
return error_mark_node;
}
/* ??? Validate that rhs does not overlap lhs. */
/* Take and save the address of the lhs. From then on we'll reference it
via indirection. */
addr = build_unary_op (ADDR_EXPR, lhs, 0);
if (addr == error_mark_node)
return error_mark_node;
addr = save_expr (addr);
if (TREE_CODE (addr) != SAVE_EXPR
&& (TREE_CODE (addr) != ADDR_EXPR
|| TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL))
{
/* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
it even after unsharing function body. */
tree var = create_tmp_var_raw (TREE_TYPE (addr), NULL);
addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
}
lhs = build_indirect_ref (addr, NULL);
/* There are lots of warnings, errors, and conversions that need to happen
in the course of interpreting a statement. Use the normal mechanisms
to do this, and then take it apart again. */
x = build_modify_expr (lhs, code, rhs);
if (x == error_mark_node)
return error_mark_node;
gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
rhs = TREE_OPERAND (x, 1);
/* Punt the actual generation of atomic operations to common code. */
return build2 (OMP_ATOMIC, void_type_node, addr, rhs);
}
/* Complete a #pragma omp flush construct. We don't do anything with the
variable list that the syntax allows. */
void
c_finish_omp_flush (void)
{
tree x;
x = built_in_decls[BUILT_IN_SYNCHRONIZE];
x = build_function_call_expr (x, NULL);
add_stmt (x);
}
/* Check and canonicalize #pragma omp for increment expression.
Helper function for c_finish_omp_for. */
static tree
check_omp_for_incr_expr (tree exp, tree decl)
{
tree t;
if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
|| TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
return error_mark_node;
if (exp == decl)
return build_int_cst (TREE_TYPE (exp), 0);
switch (TREE_CODE (exp))
{
case NOP_EXPR:
t = check_omp_for_incr_expr (TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_convert (TREE_TYPE (exp), t);
break;
case MINUS_EXPR:
t = check_omp_for_incr_expr (TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_build2 (MINUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
break;
case PLUS_EXPR:
t = check_omp_for_incr_expr (TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_build2 (PLUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
t = check_omp_for_incr_expr (TREE_OPERAND (exp, 1), decl);
if (t != error_mark_node)
return fold_build2 (PLUS_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
break;
default:
break;
}
return error_mark_node;
}
/* Validate and emit code for the OpenMP directive #pragma omp for.
INIT, COND, INCR, BODY and PRE_BODY are the five basic elements
of the loop (initialization expression, controlling predicate, increment
expression, body of the loop and statements to go before the loop).
DECL is the iteration variable. */
tree
c_finish_omp_for (location_t locus, tree declv, tree initv, tree condv,
tree incrv, tree body, tree pre_body)
{
location_t elocus;
bool fail = false;
int i;
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
{
tree decl = TREE_VEC_ELT (declv, i);
tree init = TREE_VEC_ELT (initv, i);
tree cond = TREE_VEC_ELT (condv, i);
tree incr = TREE_VEC_ELT (incrv, i);
elocus = locus;
if (EXPR_HAS_LOCATION (init))
elocus = EXPR_LOCATION (init);
/* Validate the iteration variable. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
&& TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
{
error ("%Hinvalid type for iteration variable %qE", &elocus, decl);
fail = true;
}
/* In the case of "for (int i = 0...)", init will be a decl. It should
have a DECL_INITIAL that we can turn into an assignment. */
if (init == decl)
{
elocus = DECL_SOURCE_LOCATION (decl);
init = DECL_INITIAL (decl);
if (init == NULL)
{
error ("%H%qE is not initialized", &elocus, decl);
init = integer_zero_node;
fail = true;
}
init = build_modify_expr (decl, NOP_EXPR, init);
SET_EXPR_LOCATION (init, elocus);
}
gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
gcc_assert (TREE_OPERAND (init, 0) == decl);
if (cond == NULL_TREE)
{
error ("%Hmissing controlling predicate", &elocus);
fail = true;
}
else
{
bool cond_ok = false;
if (EXPR_HAS_LOCATION (cond))
elocus = EXPR_LOCATION (cond);
if (TREE_CODE (cond) == LT_EXPR
|| TREE_CODE (cond) == LE_EXPR
|| TREE_CODE (cond) == GT_EXPR
|| TREE_CODE (cond) == GE_EXPR)
{
tree op0 = TREE_OPERAND (cond, 0);
tree op1 = TREE_OPERAND (cond, 1);
/* 2.5.1. The comparison in the condition is computed in
the type of DECL, otherwise the behavior is undefined.
For example:
long n; int i;
i < n;
according to ISO will be evaluated as:
(long)i < n;
We want to force:
i < (int)n; */
if (TREE_CODE (op0) == NOP_EXPR
&& decl == TREE_OPERAND (op0, 0))
{
TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
TREE_OPERAND (cond, 1)
= fold_build1 (NOP_EXPR, TREE_TYPE (decl),
TREE_OPERAND (cond, 1));
}
else if (TREE_CODE (op1) == NOP_EXPR
&& decl == TREE_OPERAND (op1, 0))
{
TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
TREE_OPERAND (cond, 0)
= fold_build1 (NOP_EXPR, TREE_TYPE (decl),
TREE_OPERAND (cond, 0));
}
if (decl == TREE_OPERAND (cond, 0))
cond_ok = true;
else if (decl == TREE_OPERAND (cond, 1))
{
TREE_SET_CODE (cond,
swap_tree_comparison (TREE_CODE (cond)));
TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
TREE_OPERAND (cond, 0) = decl;
cond_ok = true;
}
}
if (!cond_ok)
{
error ("%Hinvalid controlling predicate", &elocus);
fail = true;
}
}
if (incr == NULL_TREE)
{
error ("%Hmissing increment expression", &elocus);
fail = true;
}
else
{
bool incr_ok = false;
if (EXPR_HAS_LOCATION (incr))
elocus = EXPR_LOCATION (incr);
/* Check all the valid increment expressions: v++, v--, ++v, --v,
v = v + incr, v = incr + v and v = v - incr. */
switch (TREE_CODE (incr))
{
case POSTINCREMENT_EXPR:
case PREINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREDECREMENT_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
incr_ok = true;
if (POINTER_TYPE_P (TREE_TYPE (decl)))
{
tree t = fold_convert (sizetype, TREE_OPERAND (incr, 1));
if (TREE_CODE (incr) == POSTDECREMENT_EXPR
|| TREE_CODE (incr) == PREDECREMENT_EXPR)
t = fold_build1 (NEGATE_EXPR, sizetype, t);
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (decl), decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
break;
case MODIFY_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
if (TREE_OPERAND (incr, 1) == decl)
break;
if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
&& (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
|| TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
incr_ok = true;
else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
|| (TREE_CODE (TREE_OPERAND (incr, 1))
== POINTER_PLUS_EXPR))
&& TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
incr_ok = true;
else
{
tree t = check_omp_for_incr_expr (TREE_OPERAND (incr, 1),
decl);
if (t != error_mark_node)
{
incr_ok = true;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
}
break;
default:
break;
}
if (!incr_ok)
{
error ("%Hinvalid increment expression", &elocus);
fail = true;
}
}
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (incrv, i) = incr;
}
if (fail)
return NULL;
else
{
tree t = make_node (OMP_FOR);
TREE_TYPE (t) = void_type_node;
OMP_FOR_INIT (t) = initv;
OMP_FOR_COND (t) = condv;
OMP_FOR_INCR (t) = incrv;
OMP_FOR_BODY (t) = body;
OMP_FOR_PRE_BODY (t) = pre_body;
SET_EXPR_LOCATION (t, locus);
return add_stmt (t);
}
}
/* Divide CLAUSES into two lists: those that apply to a parallel construct,
and those that apply to a work-sharing construct. Place the results in
*PAR_CLAUSES and *WS_CLAUSES respectively. In addition, add a nowait
clause to the work-sharing list. */
void
c_split_parallel_clauses (tree clauses, tree *par_clauses, tree *ws_clauses)
{
tree next;
*par_clauses = NULL;
*ws_clauses = build_omp_clause (OMP_CLAUSE_NOWAIT);
for (; clauses ; clauses = next)
{
next = OMP_CLAUSE_CHAIN (clauses);
switch (OMP_CLAUSE_CODE (clauses))
{
case OMP_CLAUSE_LASTPRIVATE:
#ifdef KEY
/* Lastprivate should belong to work-sharing constructs. For C,
we leave it with the parallel construct, as wgen will combine
the parallel and work-sharing constructs into one. For C++,
wgen will generate parallel and work-sharing constructs
separately. So move lastprivate to work-sharing, and add
a "shared" clause for it to the parallel construct. This is
required in case there is a default(none) clause. See bug
13727. */
if (flag_spin_file && lang_cplus())
{
tree add_clause;
OMP_CLAUSE_CHAIN (clauses) = *ws_clauses;
*ws_clauses = clauses;
add_clause = build_omp_clause (OMP_CLAUSE_SHARED);
OMP_CLAUSE_DECL (add_clause) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_CHAIN (add_clause) = *par_clauses;
*par_clauses = add_clause;
break;
}
#endif
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_DEFAULT:
OMP_CLAUSE_CHAIN (clauses) = *par_clauses;
*par_clauses = clauses;
break;
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
OMP_CLAUSE_CHAIN (clauses) = *ws_clauses;
*ws_clauses = clauses;
break;
default:
gcc_unreachable ();
}
}
}
/* True if OpenMP sharing attribute of DECL is predetermined. */
enum omp_clause_default_kind
c_omp_predetermined_sharing (tree decl)
{
/* Variables with const-qualified type having no mutable member
are predetermined shared. */
if (TREE_READONLY (decl))
return OMP_CLAUSE_DEFAULT_SHARED;
return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
}
|
csr_matop.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matrix operation functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
#include "csr_matrix.h"
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAddFirstPass:
*
* Performs the first pass needed for Matrix/Matrix addition (C = A + B).
* This function:
* 1) Computes the row pointer of the resulting matrix C_i
* 2) Allocates memory for the matrix C and returns it to the user
*
* Notes: 1) It can be used safely inside OpenMP parallel regions.
* 2) firstrow, lastrow and marker are private variables.
* 3) The remaining arguments are shared variables.
* 4) twspace (thread workspace) must be allocated outside the
* parallel region.
* 5) The mapping arrays map_A2C and map_B2C are used when adding
* off-diagonal matrices. They can be set to NULL pointer when
* adding diagonal matrices.
* 6) Assumes that the elements of C_i are initialized to zero.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixAddFirstPass( HYPRE_Int firstrow,
HYPRE_Int lastrow,
HYPRE_Int *twspace,
HYPRE_Int *marker,
HYPRE_Int *map_A2C,
HYPRE_Int *map_B2C,
hypre_CSRMatrix *A,
hypre_CSRMatrix *B,
HYPRE_Int nrows_C,
HYPRE_Int nnzrows_C,
HYPRE_Int ncols_C,
HYPRE_Int *rownnz_C,
HYPRE_MemoryLocation memory_location_C,
HYPRE_Int *C_i,
hypre_CSRMatrix **C_ptr )
{
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Int i, ia, ib, ic, iic, ii, i1;
HYPRE_Int jcol, jj;
HYPRE_Int num_threads = hypre_NumActiveThreads();
HYPRE_Int num_nonzeros;
/* Initialize marker array */
for (i = 0; i < ncols_C; i++)
{
marker[i] = -1;
}
ii = hypre_GetThreadNum();
num_nonzeros = 0;
for (ic = firstrow; ic < lastrow; ic++)
{
iic = rownnz_C ? rownnz_C[ic] : ic;
if (map_A2C)
{
for (ia = A_i[iic]; ia < A_i[iic+1]; ia++)
{
jcol = map_A2C[A_j[ia]];
marker[jcol] = iic;
num_nonzeros++;
}
}
else
{
for (ia = A_i[iic]; ia < A_i[iic+1]; ia++)
{
jcol = A_j[ia];
marker[jcol] = iic;
num_nonzeros++;
}
}
if (map_B2C)
{
for (ib = B_i[iic]; ib < B_i[iic+1]; ib++)
{
jcol = map_B2C[B_j[ib]];
if (marker[jcol] != iic)
{
marker[jcol] = iic;
num_nonzeros++;
}
}
}
else
{
for (ib = B_i[iic]; ib < B_i[iic+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] != iic)
{
marker[jcol] = iic;
num_nonzeros++;
}
}
}
C_i[iic+1] = num_nonzeros;
}
twspace[ii] = num_nonzeros;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Correct C_i - phase 1 */
if (ii)
{
jj = twspace[0];
for (i1 = 1; i1 < ii; i1++)
{
jj += twspace[i1];
}
for (ic = firstrow; ic < lastrow; ic++)
{
iic = rownnz_C ? rownnz_C[ic] : ic;
C_i[iic+1] += jj;
}
}
else
{
num_nonzeros = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
num_nonzeros += twspace[i1];
}
*C_ptr = hypre_CSRMatrixCreate(nrows_C, ncols_C, num_nonzeros);
hypre_CSRMatrixI(*C_ptr) = C_i;
hypre_CSRMatrixRownnz(*C_ptr) = rownnz_C;
hypre_CSRMatrixNumRownnz(*C_ptr) = nnzrows_C;
hypre_CSRMatrixInitialize_v2(*C_ptr, 0, memory_location_C);
}
/* Correct C_i - phase 2 */
if (rownnz_C != NULL)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (ic = firstrow; ic < (lastrow-1); ic++)
{
for (iic = rownnz_C[ic] + 1; iic < rownnz_C[ic+1]; iic++)
{
hypre_assert(C_i[iic+1] == 0);
C_i[iic+1] = C_i[rownnz_C[ic]+1];
}
}
if (ii < (num_threads - 1))
{
for (iic = rownnz_C[lastrow-1] + 1; iic < rownnz_C[lastrow]; iic++)
{
hypre_assert(C_i[iic+1] == 0);
C_i[iic+1] = C_i[rownnz_C[lastrow-1]+1];
}
}
else
{
for (iic = rownnz_C[lastrow-1] + 1; iic < nrows_C; iic++)
{
hypre_assert(C_i[iic+1] == 0);
C_i[iic+1] = C_i[rownnz_C[lastrow-1]+1];
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
#ifdef HYPRE_DEBUG
if (!ii)
{
for (i = 0; i < nrows_C; i++)
{
hypre_assert(C_i[i] <= C_i[i+1]);
hypre_assert(((A_i[i+1] - A_i[i]) +
(B_i[i+1] - B_i[i])) >=
(C_i[i+1] - C_i[i]));
hypre_assert((C_i[i+1] - C_i[i]) >= (A_i[i+1] - A_i[i]));
hypre_assert((C_i[i+1] - C_i[i]) >= (B_i[i+1] - B_i[i]));
}
hypre_assert((C_i[nrows_C] - C_i[0]) == num_nonzeros);
}
#endif
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAddSecondPass:
*
* Performs the second pass needed for Matrix/Matrix addition (C = A + B).
* This function computes C_j and C_data.
*
* Notes: see notes for hypre_CSRMatrixAddFirstPass
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixAddSecondPass( HYPRE_Int firstrow,
HYPRE_Int lastrow,
HYPRE_Int *twspace,
HYPRE_Int *marker,
HYPRE_Int *map_A2C,
HYPRE_Int *map_B2C,
HYPRE_Int *rownnz_C,
HYPRE_Complex alpha,
HYPRE_Complex beta,
hypre_CSRMatrix *A,
hypre_CSRMatrix *B,
hypre_CSRMatrix *C )
{
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int nnzs_A = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int nnzs_B = hypre_CSRMatrixNumNonzeros(B);
HYPRE_Int *C_i = hypre_CSRMatrixI(C);
HYPRE_Int *C_j = hypre_CSRMatrixJ(C);
HYPRE_Complex *C_data = hypre_CSRMatrixData(C);
HYPRE_Int ncols_C = hypre_CSRMatrixNumCols(C);
HYPRE_Int ia, ib, ic, iic;
HYPRE_Int jcol, pos;
hypre_assert(( map_A2C && map_B2C) ||
(!map_A2C && !map_B2C) ||
( map_A2C && (nnzs_B == 0)) ||
( map_B2C && (nnzs_A == 0)));
/* Initialize marker vector */
for (ia = 0; ia < ncols_C; ia++)
{
marker[ia] = -1;
}
pos = C_i[rownnz_C ? rownnz_C[firstrow] : firstrow];
if ((map_A2C && map_B2C) || ( map_A2C && (nnzs_B == 0)) || ( map_B2C && (nnzs_A == 0)))
{
for (ic = firstrow; ic < lastrow; ic++)
{
iic = rownnz_C ? rownnz_C[ic] : ic;
for (ia = A_i[iic]; ia < A_i[iic+1]; ia++)
{
jcol = map_A2C[A_j[ia]];
C_j[pos] = jcol;
C_data[pos] = alpha*A_data[ia];
marker[jcol] = pos;
pos++;
}
for (ib = B_i[iic]; ib < B_i[iic+1]; ib++)
{
jcol = map_B2C[B_j[ib]];
if (marker[jcol] < C_i[iic])
{
C_j[pos] = jcol;
C_data[pos] = beta*B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
hypre_assert(C_j[marker[jcol]] == jcol);
C_data[marker[jcol]] += beta*B_data[ib];
}
}
hypre_assert(pos == C_i[iic+1]);
} /* end for loop */
}
else
{
for (ic = firstrow; ic < lastrow; ic++)
{
iic = rownnz_C ? rownnz_C[ic] : ic;
for (ia = A_i[iic]; ia < A_i[iic+1]; ia++)
{
jcol = A_j[ia];
C_j[pos] = jcol;
C_data[pos] = alpha*A_data[ia];
marker[jcol] = pos;
pos++;
}
for (ib = B_i[iic]; ib < B_i[iic+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] < C_i[iic])
{
C_j[pos] = jcol;
C_data[pos] = beta*B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
hypre_assert(C_j[marker[jcol]] == jcol);
C_data[marker[jcol]] += beta*B_data[ib];
}
}
hypre_assert(pos == C_i[iic+1]);
} /* end for loop */
}
hypre_assert(pos == C_i[rownnz_C ? rownnz_C[lastrow-1] + 1 : lastrow]);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAdd:
*
* Adds two CSR Matrices A and B and returns a CSR Matrix C = alpha*A + beta*B;
*
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
* in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix*
hypre_CSRMatrixAddHost ( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
HYPRE_Complex beta,
hypre_CSRMatrix *B )
{
/* CSRMatrix A */
HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
/* CSRMatrix B */
HYPRE_Int *rownnz_B = hypre_CSRMatrixRownnz(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int nnzrows_B = hypre_CSRMatrixNumRownnz(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
/* CSRMatrix C */
hypre_CSRMatrix *C;
HYPRE_Int *C_i;
HYPRE_Int *rownnz_C;
HYPRE_Int nnzrows_C;
HYPRE_Int *twspace;
HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
if (nrows_A != nrows_B || ncols_A != ncols_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n");
return NULL;
}
/* Allocate memory */
twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C);
/* Set nonzero rows data of diag_C */
nnzrows_C = nrows_A;
if ((nnzrows_A < nrows_A) && (nnzrows_B < nrows_B))
{
hypre_MergeOrderedArrays(nnzrows_A, rownnz_A,
nnzrows_B, rownnz_B,
&nnzrows_C, &rownnz_C);
}
else
{
rownnz_C = NULL;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ns, ne;
HYPRE_Int *marker = NULL;
hypre_partition1D(nnzrows_C, hypre_NumActiveThreads(), hypre_GetThreadNum(), &ns, &ne);
marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST);
hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker, NULL, NULL,
A, B, nrows_A, nnzrows_C, ncols_A, rownnz_C,
memory_location_C, C_i, &C);
hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker, NULL, NULL,
rownnz_C, alpha, beta, A, B, C);
hypre_TFree(marker, HYPRE_MEMORY_HOST);
} /* end of parallel region */
/* Free memory */
hypre_TFree(twspace, HYPRE_MEMORY_HOST);
return C;
}
hypre_CSRMatrix*
hypre_CSRMatrixAdd( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
HYPRE_Complex beta,
hypre_CSRMatrix *B)
{
hypre_CSRMatrix *C = NULL;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A),
hypre_CSRMatrixMemoryLocation(B) );
if (exec == HYPRE_EXEC_DEVICE)
{
C = hypre_CSRMatrixAddDevice(alpha, A, beta, B);
}
else
#endif
{
C = hypre_CSRMatrixAddHost(alpha, A, beta, B);
}
return C;
}
#if 0
/*--------------------------------------------------------------------------
* hypre_CSRMatrixBigAdd:
*
* RL: comment it out which was used in ams.c. Should be combined with
* above hypre_CSRMatrixAddHost whenever it is needed again
*
* Adds two CSR Matrices A and B with column indices stored as HYPRE_BigInt
* and returns a CSR Matrix C;
*
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
* in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_CSRMatrixBigAdd( hypre_CSRMatrix *A,
hypre_CSRMatrix *B )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_BigInt *A_j = hypre_CSRMatrixBigJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_BigInt *B_j = hypre_CSRMatrixBigJ(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
hypre_CSRMatrix *C;
HYPRE_Complex *C_data;
HYPRE_Int *C_i;
HYPRE_BigInt *C_j;
HYPRE_Int *twspace;
HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
if (nrows_A != nrows_B || ncols_A != ncols_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n");
return NULL;
}
/* Allocate memory */
twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ia, ib, ic, num_nonzeros;
HYPRE_Int ns, ne, pos;
HYPRE_BigInt jcol;
HYPRE_Int ii, num_threads;
HYPRE_Int jj;
HYPRE_Int *marker = NULL;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
hypre_partition1D(nrows_A, num_threads, ii, &ns, &ne);
marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST);
for (ia = 0; ia < ncols_A; ia++)
{
marker[ia] = -1;
}
/* First pass */
num_nonzeros = 0;
for (ic = ns; ic < ne; ic++)
{
C_i[ic] = num_nonzeros;
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
marker[jcol] = ic;
num_nonzeros++;
}
for (ib = B_i[ic]; ib < B_i[ic+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] != ic)
{
marker[jcol] = ic;
num_nonzeros++;
}
}
C_i[ic+1] = num_nonzeros;
}
twspace[ii] = num_nonzeros;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Correct row pointer */
if (ii)
{
jj = twspace[0];
for (ic = 1; ic < ii; ic++)
{
jj += twspace[ia];
}
for (ic = ns; ic < ne; ic++)
{
C_i[ic] += jj;
}
}
else
{
C_i[nrows_A] = 0;
for (ic = 0; ic < num_threads; ic++)
{
C_i[nrows_A] += twspace[ic];
}
C = hypre_CSRMatrixCreate(nrows_A, ncols_A, C_i[nrows_A]);
hypre_CSRMatrixI(C) = C_i;
hypre_CSRMatrixInitialize_v2(C, 1, memory_location_C);
C_j = hypre_CSRMatrixBigJ(C);
C_data = hypre_CSRMatrixData(C);
}
/* Second pass */
for (ia = 0; ia < ncols_A; ia++)
{
marker[ia] = -1;
}
pos = C_i[ns];
for (ic = ns; ic < ne; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
C_j[pos] = jcol;
C_data[pos] = A_data[ia];
marker[jcol] = pos;
pos++;
}
for (ib = B_i[ic]; ib < B_i[ic+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] < C_i[ic])
{
C_j[pos] = jcol;
C_data[pos] = B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
C_data[marker[jcol]] += B_data[ib];
}
}
}
hypre_TFree(marker, HYPRE_MEMORY_HOST);
} /* end of parallel region */
/* Free memory */
hypre_TFree(twspace, HYPRE_MEMORY_HOST);
return C;
}
#endif
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMultiplyHost
*
* Multiplies two CSR Matrices A and B and returns a CSR Matrix C;
*
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
* in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix*
hypre_CSRMatrixMultiplyHost( hypre_CSRMatrix *A,
hypre_CSRMatrix *B )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A);
HYPRE_Int num_nnz_A = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
HYPRE_Int num_nnz_B = hypre_CSRMatrixNumNonzeros(B);
HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B);
hypre_CSRMatrix *C;
HYPRE_Complex *C_data;
HYPRE_Int *C_i;
HYPRE_Int *C_j;
HYPRE_Int ia, ib, ic, ja, jb, num_nonzeros;
HYPRE_Int counter;
HYPRE_Complex a_entry, b_entry;
HYPRE_Int allsquare = 0;
HYPRE_Int *twspace;
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
if (ncols_A != nrows_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n");
return NULL;
}
if (nrows_A == ncols_B)
{
allsquare = 1;
}
if ((num_nnz_A == 0) || (num_nnz_B == 0))
{
C = hypre_CSRMatrixCreate(nrows_A, ncols_B, 0);
hypre_CSRMatrixNumRownnz(C) = 0;
hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C);
return C;
}
/* Allocate memory */
twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, memory_location_C);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(ia, ib, ic, ja, jb, num_nonzeros, counter, a_entry, b_entry)
#endif
{
HYPRE_Int *B_marker = NULL;
HYPRE_Int ns, ne, ii, jj;
HYPRE_Int num_threads;
HYPRE_Int i1, iic;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
hypre_partition1D(nnzrows_A, num_threads, ii, &ns, &ne);
B_marker = hypre_CTAlloc(HYPRE_Int, ncols_B, HYPRE_MEMORY_HOST);
for (ib = 0; ib < ncols_B; ib++)
{
B_marker[ib] = -1;
}
HYPRE_ANNOTATE_REGION_BEGIN("%s", "First pass");
/* First pass: compute sizes of C rows. */
num_nonzeros = 0;
for (ic = ns; ic < ne; ic++)
{
if (rownnz_A)
{
iic = rownnz_A[ic];
C_i[iic] = num_nonzeros;
}
else
{
iic = ic;
C_i[iic] = num_nonzeros;
if (allsquare)
{
B_marker[iic] = iic;
num_nonzeros++;
}
}
for (ia = A_i[iic]; ia < A_i[iic+1]; ia++)
{
ja = A_j[ia];
for (ib = B_i[ja]; ib < B_i[ja+1]; ib++)
{
jb = B_j[ib];
if (B_marker[jb] != iic)
{
B_marker[jb] = iic;
num_nonzeros++;
}
}
}
}
twspace[ii] = num_nonzeros;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Correct C_i - phase 1 */
if (ii)
{
jj = twspace[0];
for (i1 = 1; i1 < ii; i1++)
{
jj += twspace[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
iic = rownnz_A ? rownnz_A[i1] : i1;
C_i[iic] += jj;
}
}
else
{
C_i[nrows_A] = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
C_i[nrows_A] += twspace[i1];
}
C = hypre_CSRMatrixCreate(nrows_A, ncols_B, C_i[nrows_A]);
hypre_CSRMatrixI(C) = C_i;
hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C);
C_j = hypre_CSRMatrixJ(C);
C_data = hypre_CSRMatrixData(C);
}
/* Correct C_i - phase 2 */
if (rownnz_A != NULL)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (ic = ns; ic < (ne-1); ic++)
{
for (iic = rownnz_A[ic] + 1; iic < rownnz_A[ic+1]; iic++)
{
C_i[iic] = C_i[rownnz_A[ic+1]];
}
}
if (ii < (num_threads - 1))
{
for (iic = rownnz_A[ne-1] + 1; iic < rownnz_A[ne]; iic++)
{
C_i[iic] = C_i[rownnz_A[ne]];
}
}
else
{
for (iic = rownnz_A[ne-1] + 1; iic < nrows_A; iic++)
{
C_i[iic] = C_i[nrows_A];
}
}
}
/* End of First Pass */
HYPRE_ANNOTATE_REGION_END("%s", "First pass");
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Second pass: Fill in C_data and C_j. */
HYPRE_ANNOTATE_REGION_BEGIN("%s", "Second pass");
for (ib = 0; ib < ncols_B; ib++)
{
B_marker[ib] = -1;
}
counter = rownnz_A ? C_i[rownnz_A[ns]] : C_i[ns];
for (ic = ns; ic < ne; ic++)
{
if (rownnz_A)
{
iic = rownnz_A[ic];
}
else
{
iic = ic;
if (allsquare)
{
B_marker[ic] = counter;
C_data[counter] = 0;
C_j[counter] = ic;
counter++;
}
}
for (ia = A_i[iic]; ia < A_i[iic+1]; ia++)
{
ja = A_j[ia];
a_entry = A_data[ia];
for (ib = B_i[ja]; ib < B_i[ja+1]; ib++)
{
jb = B_j[ib];
b_entry = B_data[ib];
if (B_marker[jb] < C_i[iic])
{
B_marker[jb] = counter;
C_j[B_marker[jb]] = jb;
C_data[B_marker[jb]] = a_entry*b_entry;
counter++;
}
else
{
C_data[B_marker[jb]] += a_entry*b_entry;
}
}
}
}
HYPRE_ANNOTATE_REGION_END("%s", "Second pass");
/* End of Second Pass */
hypre_TFree(B_marker, HYPRE_MEMORY_HOST);
} /*end parallel region */
#ifdef HYPRE_DEBUG
for (ic = 0; ic < nrows_A; ic++)
{
hypre_assert(C_i[ic] <= C_i[ic+1]);
}
#endif
// Set rownnz and num_rownnz
hypre_CSRMatrixSetRownnz(C);
/* Free memory */
hypre_TFree(twspace, HYPRE_MEMORY_HOST);
return C;
}
hypre_CSRMatrix*
hypre_CSRMatrixMultiply( hypre_CSRMatrix *A,
hypre_CSRMatrix *B)
{
hypre_CSRMatrix *C = NULL;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A),
hypre_CSRMatrixMemoryLocation(B) );
if (exec == HYPRE_EXEC_DEVICE)
{
C = hypre_CSRMatrixMultiplyDevice(A,B);
}
else
#endif
{
C = hypre_CSRMatrixMultiplyHost(A,B);
}
return C;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_CSRMatrixDeleteZeros( hypre_CSRMatrix *A,
HYPRE_Real tol )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
hypre_CSRMatrix *B;
HYPRE_Complex *B_data;
HYPRE_Int *B_i;
HYPRE_Int *B_j;
HYPRE_Int zeros;
HYPRE_Int i, j;
HYPRE_Int pos_A, pos_B;
zeros = 0;
for (i = 0; i < num_nonzeros; i++)
{
if (hypre_cabs(A_data[i]) <= tol)
{
zeros++;
}
}
if (zeros)
{
B = hypre_CSRMatrixCreate(nrows_A,ncols_A,num_nonzeros-zeros);
hypre_CSRMatrixInitialize(B);
B_i = hypre_CSRMatrixI(B);
B_j = hypre_CSRMatrixJ(B);
B_data = hypre_CSRMatrixData(B);
B_i[0] = 0;
pos_A = pos_B = 0;
for (i = 0; i < nrows_A; i++)
{
for (j = A_i[i]; j < A_i[i+1]; j++)
{
if (hypre_cabs(A_data[j]) <= tol)
{
pos_A++;
}
else
{
B_data[pos_B] = A_data[pos_A];
B_j[pos_B] = A_j[pos_A];
pos_B++;
pos_A++;
}
}
B_i[i+1] = pos_B;
}
return B;
}
else
{
return NULL;
}
}
/******************************************************************************
*
* Finds transpose of a hypre_CSRMatrix
*
*****************************************************************************/
/**
* idx = idx2*dim1 + idx1
* -> ret = idx1*dim2 + idx2
* = (idx%dim1)*dim2 + idx/dim1
*/
static inline HYPRE_Int
transpose_idx (HYPRE_Int idx, HYPRE_Int dim1, HYPRE_Int dim2)
{
return idx%dim1*dim2 + idx/dim1;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixTransposeHost
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixTransposeHost(hypre_CSRMatrix *A,
hypre_CSRMatrix **AT,
HYPRE_Int data)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A);
HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A);
HYPRE_Int num_rows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Int num_nnzs_A = hypre_CSRMatrixNumNonzeros(A);
HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A);
HYPRE_Complex *AT_data;
HYPRE_Int *AT_j;
HYPRE_Int num_rows_AT;
HYPRE_Int num_cols_AT;
HYPRE_Int num_nnzs_AT;
HYPRE_Int max_col;
HYPRE_Int i, j;
/*--------------------------------------------------------------
* First, ascertain that num_cols and num_nonzeros has been set.
* If not, set them.
*--------------------------------------------------------------*/
HYPRE_ANNOTATE_FUNC_BEGIN;
if (!num_nnzs_A)
{
num_nnzs_A = A_i[num_rows_A];
}
if (num_rows_A && num_nnzs_A && ! num_cols_A)
{
max_col = -1;
for (i = 0; i < num_rows_A; ++i)
{
for (j = A_i[i]; j < A_i[i+1]; j++)
{
if (A_j[j] > max_col)
{
max_col = A_j[j];
}
}
}
num_cols_A = max_col + 1;
}
num_rows_AT = num_cols_A;
num_cols_AT = num_rows_A;
num_nnzs_AT = num_nnzs_A;
*AT = hypre_CSRMatrixCreate(num_rows_AT, num_cols_AT, num_nnzs_AT);
hypre_CSRMatrixMemoryLocation(*AT) = memory_location;
if (num_cols_A == 0)
{
// JSP: parallel counting sorting breaks down
// when A has no columns
hypre_CSRMatrixInitialize(*AT);
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
AT_j = hypre_CTAlloc(HYPRE_Int, num_nnzs_AT, memory_location);
hypre_CSRMatrixJ(*AT) = AT_j;
if (data)
{
AT_data = hypre_CTAlloc(HYPRE_Complex, num_nnzs_AT, memory_location);
hypre_CSRMatrixData(*AT) = AT_data;
}
/*-----------------------------------------------------------------
* Parallel count sort
*-----------------------------------------------------------------*/
HYPRE_Int *bucket = hypre_CTAlloc(HYPRE_Int, (num_cols_A + 1)*hypre_NumThreads(),
HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ii, num_threads, ns, ne;
HYPRE_Int i, j, j0, j1, ir;
HYPRE_Int idx, offset;
HYPRE_Int transpose_i;
HYPRE_Int transpose_i_minus_1;
HYPRE_Int transpose_i0;
HYPRE_Int transpose_j0;
HYPRE_Int transpose_j1;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
hypre_partition1D(nnzrows_A, num_threads, ii, &ns, &ne);
/*-----------------------------------------------------------------
* Count the number of entries that will go into each bucket
* bucket is used as HYPRE_Int[num_threads][num_colsA] 2D array
*-----------------------------------------------------------------*/
if (rownnz_A == NULL)
{
for (j = A_i[ns]; j < A_i[ne]; ++j)
{
bucket[ii*num_cols_A + A_j[j]]++;
}
}
else
{
for (i = ns; i < ne; i++)
{
ir = rownnz_A[i];
for (j = A_i[ir]; j < A_i[ir+1]; ++j)
{
bucket[ii*num_cols_A + A_j[j]]++;
}
}
}
/*-----------------------------------------------------------------
* Parallel prefix sum of bucket with length num_colsA * num_threads
* accessed as if it is transposed as HYPRE_Int[num_colsA][num_threads]
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ii*num_cols_A + 1; i < (ii + 1)*num_cols_A; ++i)
{
transpose_i = transpose_idx(i, num_threads, num_cols_A);
transpose_i_minus_1 = transpose_idx(i - 1, num_threads, num_cols_A);
bucket[transpose_i] += bucket[transpose_i_minus_1];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#pragma omp master
#endif
{
for (i = 1; i < num_threads; ++i)
{
j0 = num_cols_A*i - 1;
j1 = num_cols_A*(i + 1) - 1;
transpose_j0 = transpose_idx(j0, num_threads, num_cols_A);
transpose_j1 = transpose_idx(j1, num_threads, num_cols_A);
bucket[transpose_j1] += bucket[transpose_j0];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii > 0)
{
transpose_i0 = transpose_idx(num_cols_A*ii - 1, num_threads, num_cols_A);
offset = bucket[transpose_i0];
for (i = ii*num_cols_A; i < (ii + 1)*num_cols_A - 1; ++i)
{
transpose_i = transpose_idx(i, num_threads, num_cols_A);
bucket[transpose_i] += offset;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/*----------------------------------------------------------------
* Load the data and column numbers of AT
*----------------------------------------------------------------*/
if (data)
{
for (i = ne - 1; i >= ns; --i)
{
ir = rownnz_A ? rownnz_A[i] : i;
for (j = A_i[ir + 1] - 1; j >= A_i[ir]; --j)
{
idx = A_j[j];
--bucket[ii*num_cols_A + idx];
offset = bucket[ii*num_cols_A + idx];
AT_data[offset] = A_data[j];
AT_j[offset] = ir;
}
}
}
else
{
for (i = ne - 1; i >= ns; --i)
{
ir = rownnz_A ? rownnz_A[i] : i;
for (j = A_i[ir + 1] - 1; j >= A_i[ir]; --j)
{
idx = A_j[j];
--bucket[ii*num_cols_A + idx];
offset = bucket[ii*num_cols_A + idx];
AT_j[offset] = ir;
}
}
}
} /* end parallel region */
hypre_CSRMatrixI(*AT) = hypre_TAlloc(HYPRE_Int, num_cols_A + 1, memory_location);
hypre_TMemcpy(hypre_CSRMatrixI(*AT), bucket, HYPRE_Int, num_cols_A + 1, memory_location, HYPRE_MEMORY_HOST);
hypre_CSRMatrixI(*AT)[num_cols_A] = num_nnzs_A;
hypre_TFree(bucket, HYPRE_MEMORY_HOST);
// Set rownnz and num_rownnz
if (hypre_CSRMatrixNumRownnz(A) < num_rows_A)
{
hypre_CSRMatrixSetRownnz(*AT);
}
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixTranspose
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixTranspose(hypre_CSRMatrix *A,
hypre_CSRMatrix **AT,
HYPRE_Int data)
{
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_CSRMatrixTransposeDevice(A, AT, data);
}
else
#endif
{
ierr = hypre_CSRMatrixTransposeHost(A, AT, data);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixSplit
*--------------------------------------------------------------------------*/
/* RL: TODO add memory locations */
HYPRE_Int hypre_CSRMatrixSplit(hypre_CSRMatrix *Bs_ext,
HYPRE_BigInt first_col_diag_B,
HYPRE_BigInt last_col_diag_B,
HYPRE_Int num_cols_offd_B,
HYPRE_BigInt *col_map_offd_B,
HYPRE_Int *num_cols_offd_C_ptr,
HYPRE_BigInt **col_map_offd_C_ptr,
hypre_CSRMatrix **Bext_diag_ptr,
hypre_CSRMatrix **Bext_offd_ptr)
{
HYPRE_Complex *Bs_ext_data = hypre_CSRMatrixData(Bs_ext);
HYPRE_Int *Bs_ext_i = hypre_CSRMatrixI(Bs_ext);
HYPRE_BigInt *Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext);
HYPRE_Int num_rows_Bext = hypre_CSRMatrixNumRows(Bs_ext);
HYPRE_Int B_ext_diag_size = 0;
HYPRE_Int B_ext_offd_size = 0;
HYPRE_Int *B_ext_diag_i = NULL;
HYPRE_Int *B_ext_diag_j = NULL;
HYPRE_Complex *B_ext_diag_data = NULL;
HYPRE_Int *B_ext_offd_i = NULL;
HYPRE_Int *B_ext_offd_j = NULL;
HYPRE_Complex *B_ext_offd_data = NULL;
HYPRE_Int *my_diag_array;
HYPRE_Int *my_offd_array;
HYPRE_BigInt *temp;
HYPRE_Int max_num_threads;
HYPRE_Int cnt = 0;
hypre_CSRMatrix *Bext_diag = NULL;
hypre_CSRMatrix *Bext_offd = NULL;
HYPRE_BigInt *col_map_offd_C = NULL;
HYPRE_Int num_cols_offd_C = 0;
B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext+1, HYPRE_MEMORY_HOST);
B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext+1, HYPRE_MEMORY_HOST);
max_num_threads = hypre_NumThreads();
my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ns, ne, ii, num_threads;
HYPRE_Int i1, i, j;
HYPRE_Int my_offd_size, my_diag_size;
HYPRE_Int cnt_offd, cnt_diag;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
hypre_partition1D(num_rows_Bext, num_threads, ii, &ns, &ne);
my_diag_size = 0;
my_offd_size = 0;
for (i=ns; i < ne; i++)
{
B_ext_diag_i[i] = my_diag_size;
B_ext_offd_i[i] = my_offd_size;
for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B)
{
my_offd_size++;
}
else
{
my_diag_size++;
}
}
}
my_diag_array[ii] = my_diag_size;
my_offd_array[ii] = my_offd_size;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii)
{
my_diag_size = my_diag_array[0];
my_offd_size = my_offd_array[0];
for (i1 = 1; i1 < ii; i1++)
{
my_diag_size += my_diag_array[i1];
my_offd_size += my_offd_array[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
B_ext_diag_i[i1] += my_diag_size;
B_ext_offd_i[i1] += my_offd_size;
}
}
else
{
B_ext_diag_size = 0;
B_ext_offd_size = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
B_ext_diag_size += my_diag_array[i1];
B_ext_offd_size += my_offd_array[i1];
}
B_ext_diag_i[num_rows_Bext] = B_ext_diag_size;
B_ext_offd_i[num_rows_Bext] = B_ext_offd_size;
if (B_ext_diag_size)
{
B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST);
B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size)
{
B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST);
B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size || num_cols_offd_B)
{
temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size + num_cols_offd_B, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
cnt_offd = B_ext_offd_i[ns];
cnt_diag = B_ext_diag_i[ns];
for (i = ns; i < ne; i++)
{
for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B)
{
temp[cnt_offd] = Bs_ext_j[j];
B_ext_offd_j[cnt_offd] = Bs_ext_j[j];
B_ext_offd_data[cnt_offd++] = Bs_ext_data[j];
}
else
{
B_ext_diag_j[cnt_diag] = Bs_ext_j[j] - first_col_diag_B;
B_ext_diag_data[cnt_diag++] = Bs_ext_data[j];
}
}
}
/* This computes the mappings */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii == 0)
{
cnt = 0;
if (B_ext_offd_size || num_cols_offd_B)
{
cnt = B_ext_offd_size;
for (i=0; i < num_cols_offd_B; i++)
{
temp[cnt++] = col_map_offd_B[i];
}
if (cnt)
{
hypre_BigQsort0(temp, 0, cnt-1);
num_cols_offd_C = 1;
HYPRE_BigInt value = temp[0];
for (i = 1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_C++] = value;
}
}
}
if (num_cols_offd_C)
{
col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST);
}
for (i = 0; i < num_cols_offd_C; i++)
{
col_map_offd_C[i] = temp[i];
}
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ns; i < ne; i++)
{
for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++)
{
B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_ext_offd_j[j], num_cols_offd_C);
}
}
} /* end parallel region */
hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST);
Bext_diag = hypre_CSRMatrixCreate(num_rows_Bext, last_col_diag_B-first_col_diag_B+1, B_ext_diag_size);
hypre_CSRMatrixMemoryLocation(Bext_diag) = HYPRE_MEMORY_HOST;
Bext_offd = hypre_CSRMatrixCreate(num_rows_Bext, num_cols_offd_C, B_ext_offd_size);
hypre_CSRMatrixMemoryLocation(Bext_offd) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(Bext_diag) = B_ext_diag_i;
hypre_CSRMatrixJ(Bext_diag) = B_ext_diag_j;
hypre_CSRMatrixData(Bext_diag) = B_ext_diag_data;
hypre_CSRMatrixI(Bext_offd) = B_ext_offd_i;
hypre_CSRMatrixJ(Bext_offd) = B_ext_offd_j;
hypre_CSRMatrixData(Bext_offd) = B_ext_offd_data;
*col_map_offd_C_ptr = col_map_offd_C;
*Bext_diag_ptr = Bext_diag;
*Bext_offd_ptr = Bext_offd;
*num_cols_offd_C_ptr = num_cols_offd_C;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixReorderHost
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixReorderHost(hypre_CSRMatrix *A)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A);
HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A);
HYPRE_Int num_rows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Int i, ii, j;
/* the matrix should be square */
if (num_rows_A != num_cols_A)
{
return -1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, ii, j) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < nnzrows_A; i++)
{
ii = rownnz_A ? rownnz_A[i] : i;
for (j = A_i[ii]; j < A_i[ii+1]; j++)
{
if (A_j[j] == ii)
{
if (j != A_i[ii])
{
hypre_swap(A_j, A_i[ii], j);
hypre_swap_c(A_data, A_i[ii], j);
}
break;
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixReorder:
*
* Reorders the column and data arrays of a square CSR matrix, such that the
* first entry in each row is the diagonal one.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixReorder(hypre_CSRMatrix *A)
{
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_CSRMatrixMoveDiagFirstDevice(A);
}
else
#endif
{
ierr = hypre_CSRMatrixReorderHost(A);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAddPartial:
* adds matrix rows in the CSR matrix B to the CSR Matrix A, where row_nums[i]
* defines to which row of A the i-th row of B is added, and returns a CSR Matrix C;
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
* in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_CSRMatrixAddPartial( hypre_CSRMatrix *A,
hypre_CSRMatrix *B,
HYPRE_Int *row_nums)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
hypre_CSRMatrix *C;
HYPRE_Complex *C_data;
HYPRE_Int *C_i;
HYPRE_Int *C_j;
HYPRE_Int ia, ib, ic, jcol, num_nonzeros;
HYPRE_Int pos, i, i2, j, cnt;
HYPRE_Int *marker;
HYPRE_Int *map;
HYPRE_Int *temp;
HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
if (ncols_A != ncols_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n");
return NULL;
}
map = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST);
temp = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST);
for (i=0; i < nrows_B; i++)
{
map[i] = i;
temp[i] = row_nums[i];
}
hypre_qsort2i(temp,map,0,nrows_B-1);
marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, memory_location_C);
for (ia = 0; ia < ncols_A; ia++)
{
marker[ia] = -1;
}
num_nonzeros = 0;
C_i[0] = 0;
cnt = 0;
for (ic = 0; ic < nrows_A; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
marker[jcol] = ic;
num_nonzeros++;
}
if (cnt < nrows_B && temp[cnt] == ic)
{
for (j = cnt; j < nrows_B; j++)
{
if (temp[j] == ic)
{
i2 = map[cnt++];
for (ib = B_i[i2]; ib < B_i[i2+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] != ic)
{
marker[jcol] = ic;
num_nonzeros++;
}
}
}
else
{
break;
}
}
}
C_i[ic+1] = num_nonzeros;
}
C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros);
hypre_CSRMatrixI(C) = C_i;
hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C);
C_j = hypre_CSRMatrixJ(C);
C_data = hypre_CSRMatrixData(C);
for (ia = 0; ia < ncols_A; ia++)
{
marker[ia] = -1;
}
cnt = 0;
pos = 0;
for (ic = 0; ic < nrows_A; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
C_j[pos] = jcol;
C_data[pos] = A_data[ia];
marker[jcol] = pos;
pos++;
}
if (cnt < nrows_B && temp[cnt] == ic)
{
for (j = cnt; j < nrows_B; j++)
{
if (temp[j] == ic)
{
i2 = map[cnt++];
for (ib = B_i[i2]; ib < B_i[i2+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] < C_i[ic])
{
C_j[pos] = jcol;
C_data[pos] = B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
C_data[marker[jcol]] += B_data[ib];
}
}
}
else
{
break;
}
}
}
}
hypre_TFree(marker, HYPRE_MEMORY_HOST);
hypre_TFree(map, HYPRE_MEMORY_HOST);
hypre_TFree(temp, HYPRE_MEMORY_HOST);
return C;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixSumElts:
* Returns the sum of all matrix elements.
*--------------------------------------------------------------------------*/
HYPRE_Complex
hypre_CSRMatrixSumElts( hypre_CSRMatrix *A )
{
HYPRE_Complex sum = 0;
HYPRE_Complex *data = hypre_CSRMatrixData(A);
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_nonzeros; i++)
{
sum += data[i];
}
return sum;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixFnorm
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_CSRMatrixFnorm( hypre_CSRMatrix *A )
{
HYPRE_Int nrows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int i;
HYPRE_Complex sum = 0;
hypre_assert(num_nonzeros == A_i[nrows]);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_nonzeros; ++i)
{
HYPRE_Complex v = A_data[i];
sum += v * v;
}
return sqrt(sum);
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixComputeRowSumHost
*
* type == 0, sum,
* 1, abs sum
* 2, square sum
*--------------------------------------------------------------------------*/
void
hypre_CSRMatrixComputeRowSumHost( hypre_CSRMatrix *A,
HYPRE_Int *CF_i,
HYPRE_Int *CF_j,
HYPRE_Complex *row_sum,
HYPRE_Int type,
HYPRE_Complex scal,
const char *set_or_add)
{
HYPRE_Int nrows = hypre_CSRMatrixNumRows(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int i, j;
for (i = 0; i < nrows; i++)
{
HYPRE_Complex row_sum_i = set_or_add[0] == 's' ? 0.0 : row_sum[i];
for (j = A_i[i]; j < A_i[i+1]; j++)
{
if (CF_i && CF_j && CF_i[i] != CF_j[A_j[j]])
{
continue;
}
if (type == 0)
{
row_sum_i += scal * A_data[j];
}
else if (type == 1)
{
row_sum_i += scal * fabs(A_data[j]);
}
else if (type == 2)
{
row_sum_i += scal * A_data[j] * A_data[j];
}
}
row_sum[i] = row_sum_i;
}
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixComputeRowSum
*--------------------------------------------------------------------------*/
void
hypre_CSRMatrixComputeRowSum( hypre_CSRMatrix *A,
HYPRE_Int *CF_i,
HYPRE_Int *CF_j,
HYPRE_Complex *row_sum,
HYPRE_Int type,
HYPRE_Complex scal,
const char *set_or_add)
{
hypre_assert( (CF_i && CF_j) || (!CF_i && !CF_j) );
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_CSRMatrixComputeRowSumDevice(A, CF_i, CF_j, row_sum, type, scal, set_or_add);
}
else
#endif
{
hypre_CSRMatrixComputeRowSumHost(A, CF_i, CF_j, row_sum, type, scal, set_or_add);
}
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixExtractDiagonalHost
* type 0: diag
* 1: abs diag
* 2: diag inverse
* 3: diag inverse sqrt
* 4: abs diag inverse sqrt
*--------------------------------------------------------------------------*/
void
hypre_CSRMatrixExtractDiagonalHost( hypre_CSRMatrix *A,
HYPRE_Complex *d,
HYPRE_Int type)
{
HYPRE_Int nrows = hypre_CSRMatrixNumRows(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int i, j;
HYPRE_Complex d_i;
for (i = 0; i < nrows; i++)
{
d_i = 0.0;
for (j = A_i[i]; j < A_i[i+1]; j++)
{
if (A_j[j] == i)
{
if (type == 0)
{
d_i = A_data[j];
}
else if (type == 1)
{
d_i = fabs(A_data[j]);
}
else if (type == 2)
{
d_i = 1.0 /(A_data[j]);
}
else if (type == 3)
{
d_i = 1.0 /(sqrt(A_data[j]));
}
else if (type == 4)
{
d_i = 1.0 /(sqrt(fabs(A_data[j])));
}
break;
}
}
d[i] = d_i;
}
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixExtractDiagonal
*
* type 0: diag
* 1: abs diag
* 2: diag inverse
* 3: diag inverse sqrt
*--------------------------------------------------------------------------*/
void
hypre_CSRMatrixExtractDiagonal( hypre_CSRMatrix *A,
HYPRE_Complex *d,
HYPRE_Int type)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_CSRMatrixExtractDiagonalDevice(A, d, type);
}
else
#endif
{
hypre_CSRMatrixExtractDiagonalHost(A, d, type);
}
}
/* Scale CSR matrix A = scalar * A
*/
HYPRE_Int
hypre_CSRMatrixScale( hypre_CSRMatrix *A,
HYPRE_Complex scalar)
{
HYPRE_Complex *data = hypre_CSRMatrixData(A);
HYPRE_Int i;
HYPRE_Int k = hypre_CSRMatrixNumNonzeros(A);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypreDevice_Scalen(data, k, scalar);
}
else
#endif
{
for (i = 0; i < k; i++)
{
data[i] *= scalar;
}
}
return hypre_error_flag;
}
HYPRE_Int
hypre_CSRMatrixSetConstantValues( hypre_CSRMatrix *A,
HYPRE_Complex value)
{
HYPRE_Int i;
HYPRE_Int nnz = hypre_CSRMatrixNumNonzeros(A);
if (!hypre_CSRMatrixData(A))
{
hypre_CSRMatrixData(A) = hypre_TAlloc(HYPRE_Complex, nnz, hypre_CSRMatrixMemoryLocation(A));
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypreDevice_Filln(hypre_CSRMatrixData(A), nnz, value);
}
else
#endif
{
for (i = 0; i < nnz; i++)
{
hypre_CSRMatrixData(A)[i] = value;
}
}
return hypre_error_flag;
}
|
fast_ops.c | #include <stdio.h>
#include <blis.h>
// porting to mobile information
// https://stackoverflow.com/questions/11228855/header-files-for-x86-simd-intrinsics
// https://github.com/DLTcollab/sse2neon
#include <immintrin.h>
#include "fast_ops.h"
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
void printv(__m128 v){
printf("fvec = [ %f, %f, %f, %f ]\n", v[0], v[1], v[2], v[3]);
}
void printvi(__m128i v){
int v0 = _mm_extract_epi32(v, 0);
int v1 = _mm_extract_epi32(v, 1);
int v2 = _mm_extract_epi32(v, 2);
int v3 = _mm_extract_epi32(v, 3);
printf("ivec = [ %d, %d, %d, %d ]\n", v0, v1, v2, v3);
}
float find_min(float* buff, int n)
{
// https://doc.rust-lang.org/nightly/core/arch/x86_64/index.html
// https://www.cs.virginia.edu/~cr4bd/3330/S2018/simdref.html
int i;
float vmin = buff[0];
const int K = 4;
__m128 minval = _mm_loadu_ps(&buff[0]);
for (i = 0; i + K < n; i += K) {
minval = _mm_min_ps(minval, _mm_loadu_ps(&buff[i]));
}
for (; i < n; ++i) {
if(buff[i] < vmin){
vmin = buff[i];
}
}
for(i = 0; i < K; ++i){
if(minval[i] < vmin){
vmin = minval[i];
}
}
return vmin;
}
int argmin_vector(float *x, int n, float* min_value){
int ret_val;
float smin;
int i, k;
ret_val = 0;
smin = find_min(x, n);
const int K = 4;
const __m128i vIndexInc = _mm_set1_epi32(K);
const __m128 vMinVal = _mm_set1_ps(smin);
__m128i vMinIndex = _mm_setr_epi32(0, 1, 2, 3);
__m128i vIndex = vMinIndex;
for (i = 0; i + 4 < n; i+=4) {
__m128 vcmp = _mm_cmpeq_ps(_mm_loadu_ps(&x[i]), vMinVal);
__m128i mask = _mm_castps_si128(vcmp);
vMinIndex = _mm_min_epi32(_mm_madd_epi16(vIndex, mask), vMinIndex);
vIndex = _mm_add_epi32(vIndex, vIndexInc);
}
k = -1;
for (; i < n; ++i) {
k = (x[i] == smin) ? i : k;
}
if ( k < 0){
k = MAX(-_mm_extract_epi32(vMinIndex, 0), k);
k = MAX(-_mm_extract_epi32(vMinIndex, 1), k);
k = MAX(-_mm_extract_epi32(vMinIndex, 2), k);
k = MAX(-_mm_extract_epi32(vMinIndex, 3), k);
}
ret_val = k;
*min_value = smin;
return ret_val;
}
void sum_square_cols(float* X, float *y, int num_rows, int num_cols) {
int i, j;
float sum;
float *row_ptr;
#pragma omp parallel for private(i, j, sum)
for (i = 0; i < num_rows; ++i)
{
row_ptr = (X + i * num_cols);
sum = 0.0;
for (j = 0; j < num_cols; ++j){
sum += row_ptr[j] * row_ptr[j];
}
y[i] = sum;
}
}
void fast_cross_check_match(int *irow, float *vrow, float *vcol, float* X, int num_rows, int num_cols) {
int i, j;
float min_value;
float *row_ptr;
#pragma omp parallel for private(i, min_value)
for (i = 0; i < num_rows; ++i){
irow[i] = argmin_vector((X + i * num_cols), num_cols, &min_value);
vrow[i] = min_value;
}
#pragma GCC ivdep
for (j = 0; j < num_cols; ++j){
vcol[j] = X[j];
}
for (i = 0; i < num_rows; ++i){
row_ptr = (X + i * num_cols);
#pragma GCC ivdep
for (j = 0; j < num_cols; ++j){
vcol[j] = MIN(row_ptr[j], vcol[j]);
}
}
// apply cross check condition
#pragma GCC ivdep
for (i = 0; i < num_rows; ++i){
if (irow[i] > -1 && vrow[i] != vcol[irow[i]]){
irow[i] = -1;
}
}
}
void sum_row_and_col_vectors(float* row, float *col, float* X, int num_rows, int num_cols) {
int i, j;
float *row_ptr;
float row_val;
#pragma omp parallel for private(i, j, row_val, row_ptr)
for (i = 0; i < num_rows; ++i){
row_ptr = (X + i * num_cols);
row_val = row[i];
#pragma GCC ivdep
for (j = 0; j < num_cols; ++j){
row_ptr[j] = row_val + col[j];
}
}
}
void fast_ratio_test_match(int *irow, float *vrow, float* X, int num_rows, int num_cols, float ratio) {
// finds two nearest neighbours for Lowe's ratio test, returns -1 if match does not
// satisfy the ratio test
int i, min_index;
float min_value, second_min_value;
#pragma omp parallel for private(i, min_value, min_index, second_min_value)
for (i = 0; i < num_rows; ++i){
min_index = argmin_vector((X + i * num_cols), num_cols, &min_value);
float tmp_value = X[min_index + i * num_cols];
// some large value
X[min_index + i * num_cols] = 1000000.0;
// search for second min value in the row
argmin_vector((X + i * num_cols), num_cols, &second_min_value);
// revert change
X[min_index + i * num_cols] = tmp_value;
if (min_value / second_min_value > ratio){
irow[i] = -1;
vrow[i] = 0;
}else{
irow[i] = min_index;
vrow[i] = min_value;
}
}
}
void fast_ratio_test_cross_check_match(int *irow, float *vrow, float *vcol, float* X, int num_rows, int num_cols, float ratio) {
// combined method which check ratio test and cross check together
int i, j;
float *row_ptr;
// ratio test step
fast_ratio_test_match(irow, vrow, X, num_rows, num_cols, ratio);
// cross check step
#pragma GCC ivdep
for (j = 0; j < num_cols; ++j){
vcol[j] = X[j];
}
// finding minimum value in each column
for (i = 0; i < num_rows; ++i){
row_ptr = (X + i * num_cols);
#pragma GCC ivdep
for (j = 0; j < num_cols; ++j){
vcol[j] = MIN(row_ptr[j], vcol[j]);
}
}
// apply cross check condition
#pragma GCC ivdep
for (i = 0; i < num_rows; ++i){
if (irow[i] > -1 && vrow[i] != vcol[irow[i]]){
irow[i] = -1;
vrow[i] = 0;
}
}
} |
layer.h | // == mojo ====================================================================
//
// Copyright (c) gnawice@gnawice.com. All rights reserved.
// See LICENSE in root folder
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files(the "Software"),
// to deal in the Software without restriction, including without
// limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to
// whom the Software is furnished to do so, subject to the following
// conditions :
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
// OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
// THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// ============================================================================
// layer.h: defines layers for neural network
// ==================================================================== mojo ==
#pragma once
#include <string>
#include <sstream>
#include "core_math.h"
#include "activation.h"
namespace mojo
{
#include <windows.h>
/*
double PCFreq = 0.0;
__int64 CounterStart = 0;
void StartCounter()
{
LARGE_INTEGER li;
if (!QueryPerformanceFrequency(&li)) return;
PCFreq = double(li.QuadPart) / 1000.0;
QueryPerformanceCounter(&li);
CounterStart = li.QuadPart;
}
double GetCounter()
{
LARGE_INTEGER li;
QueryPerformanceCounter(&li);
return double(li.QuadPart - CounterStart) / PCFreq;
}
*/
#define int2str(a) std::to_string((long long)a)
#define float2str(a) std::to_string((long double)a)
#define bail(txt) {std::cerr << txt; throw;}
//----------------------------------------------------------------------------------------------------------
// B A S E L A Y E R
//
// all other layers derived from this
class base_layer
{
protected:
bool _has_weights;
bool _use_bias;
float _learning_factor;
int _thread_count;
public:
activation_function *p_act;
bool has_weights() {return _has_weights;}
bool use_bias() { return _use_bias; }
void set_learning_factor(float f=1.0f) {_learning_factor = 1.f;}
void set_threading(int thread_count) {_thread_count=thread_count; if(_thread_count<1) _thread_count=1;}
int pad_cols, pad_rows;
matrix node;
matrix bias; // this is something that maybe should be in the same class as the weights... but whatever. handled differently for different layers
std::string name;
// index of W matrix, index of connected layer
std::vector<std::pair<int,base_layer*>> forward_linked_layers;
#ifndef MOJO_NO_TRAINING
matrix delta;
std::vector<std::pair<int,base_layer*>> backward_linked_layers;
virtual void distribute_delta(base_layer &top, const matrix &w, const int train = 1) =0;
virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train =1)=0;
virtual void update_bias(const matrix &newbias, float alpha) {};
#endif
virtual void accumulate_signal(const base_layer &top_node, const matrix &w, const int train =0) =0;
base_layer(const char* layer_name, int _w, int _h=1, int _c=1) : node(_w, _h, _c), p_act(NULL), name(layer_name), _has_weights(true), pad_cols(0), pad_rows(0), _learning_factor(1.f), _use_bias(false), _thread_count(1)
#ifndef MOJO_NO_TRAINING
,delta(_w,_h,_c,NULL,false)
#endif
{
}
virtual void resize(int _w, int _h=1, int _c=1)
{
if (_w<1) _w = 1; if (_h<1) _h = 1; if (_c<1) _c = 1;
node =matrix(_w,_h,_c);
if (_use_bias) { bias = matrix(_w, _h, _c); bias.fill(0.); }
#ifndef MOJO_NO_TRAINING
delta =matrix(_w,_h,_c,NULL,false);
#endif
}
virtual ~base_layer(){if(p_act) delete p_act;}
virtual int fan_size() {return node.chans*node.rows*node.cols;}
virtual void activate_nodes()
{
if (p_act)
{
if(_use_bias)
//for (int c=0; c<node.chans; c++)
{
//const float b = bias.x[c];
//float *x= &node.x[c*node.chan_stride];
p_act->f(node.x,node.size(),bias.x);
}
else
p_act->f(node.x, node.size(), 0);
}
}
virtual matrix * new_connection(base_layer &top, int weight_mat_index)
{
top.forward_linked_layers.push_back(std::make_pair((int)weight_mat_index,this));
#ifndef MOJO_NO_TRAINING
backward_linked_layers.push_back(std::make_pair((int)weight_mat_index,&top));
#endif
if (_has_weights)
{
int rows = node.cols*node.rows*node.chans;
int cols = top.node.cols*top.node.rows*top.node.chans;
return new matrix(cols, rows, 1);
}
else
return NULL;
}
//inline float f(float *in, int i, int size, float bias) {return p_act->f(in, i, size, bias);};
inline float df(float *in, int i, int size) { if (p_act) return p_act->df(in, i, size); else return 1.f; };
virtual std::string get_config_string() =0;
};
//----------------------------------------------------------------------------------------------------------
// I N P U T L A Y E R
//
// input layer class - can be 1D, 2D (c=1), or stacked 2D (c>1)
class input_layer : public base_layer
{
public:
input_layer(const char *layer_name, int _w, int _h=1, int _c=1) : base_layer(layer_name,_w,_h,_c) {p_act=new_activation_function("identity"); }
virtual ~input_layer(){}
virtual void activate_nodes() { /*node.reset_empty_chans(); */}
virtual void distribute_delta(base_layer &top, const matrix &w, const int train =1) {}
virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train =1) {}
virtual void accumulate_signal(const base_layer &top_node, const matrix &w, const int train =0) {}
virtual std::string get_config_string() {std::string str="input "+int2str(node.cols)+" "+int2str(node.rows)+" "+int2str(node.chans)+ " "+p_act->name+"\n"; return str;}
};
//----------------------------------------------------------------------------------------------------------
// F U L L Y C O N N E C T E D
//
// fully connected layer
class fully_connected_layer : public base_layer
{
public:
fully_connected_layer(const char *layer_name, int _size, activation_function *p) : base_layer(layer_name, _size, 1, 1)
{
p_act = p; _use_bias = true;
bias = matrix(node.cols, node.rows, node.chans);
bias.fill(0.);
}//layer_type=fully_connected_type;}
virtual std::string get_config_string() {std::string str="fully_connected "+int2str(node.size())+ " "+p_act->name+"\n"; return str;}
virtual void accumulate_signal( const base_layer &top,const matrix &w, const int train =0)
{
// doesn't care if shape is not 1D
// here weights are formated in matrix, top node in cols, bottom node along rows. (note that my top is opposite of traditional understanding)
// node += top.node.dot_1dx2d(w);
const int s = w.rows;
const int ts = top.node.size();
const int ts2 = top.node.cols*top.node.rows;
// this can be sped up a little with SSE.
if(top.node.chan_stride!=ts2)
{
//std::cout << "here: " << top.node.chan_stride << ","<< ts2 << ","<< top.node.chans << ":";
MOJO_THREAD_THIS_LOOP(_thread_count)
for (int j = 0; j < s; j++)
{
for (int i = 0; i < top.node.chans; i++)
{
node.x[j] += dot(top.node.x+top.node.chan_stride*i, w.x+j*w.cols+ts2*i, ts2);
//float *f=top.node.x+top.node.chan_stride*i;
//if(node.x[j]!=node.x[j])
if(node.x[j]!=node.x[j])
{
//std::cout << "stuff" << top.name << " " << name << " " << top.node.x[top.node.chan_stride*i] << " " << w.x[j*w.cols+ts2*i] << " | " ;
for (int k=0; k<top.node.size(); k++)
{
std::cout << k<< ","<< top.node.x[k] <<",";
}
exit(1);
}
}
}
}
else
{
MOJO_THREAD_THIS_LOOP(_thread_count)
for (int j = 0; j < s; j++) node.x[j] += dot(top.node.x, w.x+j*w.cols, ts);
}
}
#ifndef MOJO_NO_TRAINING
virtual void update_bias(const matrix &newbias, float alpha) {
for (int j = 0; j < bias.size(); j++) bias.x[j] -= newbias.x[j] * alpha;
}
virtual void distribute_delta(base_layer &top, const matrix &w, const int train =1)
{
if(top.delta.cols*top.delta.rows==top.delta.chan_stride)
{
const int w_cols = w.cols;
for (int b = 0; b < delta.size(); b++)
{
const float cb = delta.x[b];
for (int t = 0; t < top.delta.size(); t++)
top.delta.x[t] += cb*w.x[t + b*w_cols];
}
}
else
{
const int w_cols = w.cols;
const int chan_size=top.delta.cols*top.delta.rows;
for (int b = 0; b < delta.size(); b++)
{
const float cb = delta.x[b];
for (int tc = 0; tc < top.delta.chans; tc++)
for (int t = 0; t < chan_size; t++)
top.delta.x[t+tc*top.delta.chan_stride] += cb*w.x[t + tc*chan_size + b*w_cols];
}
}
}
virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train = 1)
{
const float *bottom = delta.x; const int sizeb = delta.size();
const float *top = top_layer.node.x; const int sizet = top_layer.node.cols*top_layer.node.rows*top_layer.node.chans;
dw.resize(sizet, sizeb, 1);
for (int b = 0; b < sizeb; b++)
{
const float cb = bottom[b];
const int chan_size = top_layer.node.cols*top_layer.node.rows;
if(sizet!=top_layer.node.size())
{
//std::cout << "calculate_dw - odd size";
for (int tc = 0; tc < top_layer.node.chans; tc++)
for (int t = 0; t < chan_size; t++)
{
dw.x[t+tc*chan_size + b*sizet] = top[t+tc*top_layer.node.chan_stride] * cb;
//std::cout << dw.x[t+tc*chan_size + b*sizet] <<",";
}
}
else
{
for (int t = 0; t < sizet; t++) dw.x[t + b*sizet] = top[t] * cb;
}
}
}
#endif
};
//----------------------------------------------------------------------------------------------------------
// M A X P O O L I N G
//
// may split to max and ave pool class derived from pooling layer.. but i never use ave pool anymore
class max_pooling_layer : public base_layer
{
protected:
int _pool_size;
int _stride;
// uses a map to connect pooled result to top layer
std::vector<int> _max_map;
public:
max_pooling_layer(const char *layer_name, int pool_size) : base_layer(layer_name, 1)
{
_stride = pool_size; _pool_size = pool_size; //layer_type=pool_type;
_has_weights = false;
}
max_pooling_layer(const char *layer_name, int pool_size, int stride ) : base_layer(layer_name, 1)
{
_stride= stride; _pool_size=pool_size; //layer_type=pool_type;
_has_weights = false;
}
virtual ~max_pooling_layer(){}
virtual std::string get_config_string() {std::string str="max_pool "+int2str(_pool_size) +" "+ int2str(_stride) +"\n"; return str;}
// ToDo would like delayed activation of conv layer if available
// virtual void activate_nodes(){ return;}
virtual void resize(int _w, int _h=1, int _c=1)
{
if(_w<1) _w=1; if(_h<1) _h=1; if(_c<1) _c=1;
_max_map.resize(_w*_h*_c);
base_layer::resize(_w, _h, _c);
}
// no weights
virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train =1) {}
virtual matrix * new_connection(base_layer &top, int weight_mat_index)
{
// need to set the size of this layer
// can really only handle one connection comming in to this
int pool_size = _pool_size;
int w = (top.node.cols) / pool_size;
int h = (top.node.rows) / pool_size;
if (_stride != _pool_size)
{
w = 1 + ((top.node.cols - _pool_size) / _stride);
h = 1 + ((top.node.rows - _pool_size) / _stride);
}
resize(w, h, top.node.chans);
return base_layer::new_connection(top, weight_mat_index);
}
// this is downsampling
// the pool size must fit correctly in the image map (use resize prior to call if this isn't the case)
virtual void accumulate_signal(const base_layer &top,const matrix &w,const int train =0)
{
int kstep = top.node.chan_stride; // top.node.cols*top.node.rows;
int jstep=top.node.cols;
int output_index=0;
int *p_map = _max_map.data();
int pool_y=_pool_size; if(top.node.rows==1) pool_y=1; //-top.pad_rows*2==1) pool_y=1;
int pool_x=_pool_size; if(top.node.cols==1) pool_x=1;//-top.pad_cols*2==1) pool_x=1;
const float *top_node = top.node.x;
for(int k=0; k<top.node.chans; k++)
{
for(int j=0; j<=top.node.rows- _pool_size; j+= _stride)
{
for(int i=0; i<=top.node.cols- _pool_size; i+= _stride)
{
const int base_index=i+(j)*jstep+k*kstep;
int max_i=base_index;
float max=top_node[base_index];
if(pool_x==2)
{
const float *n=top_node+base_index;
//if(max<n[0]) { max = n[0]; max_i=max_i;}
if(max<n[1]) { max = n[1]; max_i=base_index+1;}
n+=jstep;
if(max<n[0]) { max = n[0]; max_i=base_index+jstep;}
if(max<n[1]) { max = n[1]; max_i=base_index+jstep+1;}
}
else if(pool_x==3)
{
const float *n=top_node+base_index;
//if(max<n[0]) { max = n[0]; max_i=max_i;}
if(max<n[1]) { max = n[1]; max_i=base_index+1;}
if(max<n[2]) { max = n[2]; max_i=base_index+2;}
n+=jstep;
if(max<n[0]) { max = n[0]; max_i=base_index+jstep;}
if(max<n[1]) { max = n[1]; max_i=base_index+jstep+1;}
if(max<n[2]) { max = n[2]; max_i=base_index+jstep+2;}
n+=jstep;
if(max<n[0]) { max = n[0]; max_i=base_index+2*jstep;}
if(max<n[1]) { max = n[1]; max_i=base_index+2*jstep+1;}
if(max<n[2]) { max = n[2]; max_i=base_index+2*jstep+2;}
}
else if(pool_x==4)
{
const float *n=top_node+base_index;
//if(max<n[0]) { max = n[0]; max_i=max_i;}
if(max<n[1]) { max = n[1]; max_i=base_index+1;}
if(max<n[2]) { max = n[2]; max_i=base_index+2;}
if(max<n[3]) { max = n[3]; max_i=base_index+3;}
n+=jstep;
if(max<n[0]) { max = n[0]; max_i=base_index+jstep;}
if(max<n[1]) { max = n[1]; max_i=base_index+jstep+1;}
if(max<n[2]) { max = n[2]; max_i=base_index+jstep+2;}
if(max<n[3]) { max = n[3]; max_i=base_index+jstep+3;}
n+=jstep;
if(max<n[0]) { max = n[0]; max_i=base_index+2*jstep;}
if(max<n[1]) { max = n[1]; max_i=base_index+2*jstep+1;}
if(max<n[2]) { max = n[2]; max_i=base_index+2*jstep+2;}
if(max<n[3]) { max = n[3]; max_i=base_index+2*jstep+3;}
n+=jstep;
if(max<n[0]) { max = n[0]; max_i=base_index+3*jstep;}
if(max<n[1]) { max = n[1]; max_i=base_index+3*jstep+1;}
if(max<n[2]) { max = n[2]; max_i=base_index+3*jstep+2;}
if(max<n[3]) { max = n[3]; max_i=base_index+3*jstep+3;}
}
else
{
// speed up with optimized size version
for(int jj=0; jj<pool_y; jj+= 1)
{
for(int ii=0; ii<pool_x; ii+= 1)
{
int index=i+ii+(j+jj)*jstep+k*kstep;
if((max)<(top_node[index]))
{
max = top_node[index];
max_i=index;
}
}
}
}
//if (max<1e-5) node.empty_chan[k] = 1;
//else node.empty_chan[k] = 0;
node.x[output_index] = top_node[max_i];
p_map[output_index] = max_i;
output_index++;
}
}
}
}
#ifndef MOJO_NO_TRAINING
// this is upsampling
virtual void distribute_delta(base_layer &top, const matrix &w, const int train =1)
{
int *p_map = _max_map.data();
const int s = (int)_max_map.size();
for(int k=0; k<s; k++) top.delta.x[p_map[k]]+=delta.x[k];
}
#endif
};
//----------------------------------------------------------------------------------------------------------
// S E M I S T O C H A S T I C P O O L I N G
// concept similar to stochastic pooling but only slects 'max' based on top 2 candidates
class semi_stochastic_pooling_layer : public max_pooling_layer
{
public:
semi_stochastic_pooling_layer(const char *layer_name, int pool_size) : max_pooling_layer(layer_name, pool_size) {}
semi_stochastic_pooling_layer(const char *layer_name, int pool_size, int stride) : max_pooling_layer(layer_name, pool_size, stride){}
virtual std::string get_config_string() { std::string str = "semi_stochastic_pool " + int2str(_pool_size) + " " + int2str(_stride) + "\n"; return str; }
virtual void accumulate_signal(const base_layer &top, const matrix &w, const int train = 0)
{
int kstep = top.node.cols*top.node.rows;
int jstep = top.node.cols;
int output_index = 0;
int *p_map = _max_map.data();
int pool_y = _pool_size; if (top.node.rows == 1) pool_y = 1; //-top.pad_rows*2==1) pool_y=1;
int pool_x = _pool_size; if (top.node.cols == 1) pool_x = 1;//-top.pad_cols*2==1) pool_x=1;
const float *top_node = top.node.x;
for (int k = 0; k<top.node.chans; k++)
{
for (int j = 0; j <= top.node.rows - _pool_size; j += _stride)
{
for (int i = 0; i <= top.node.cols - _pool_size; i += _stride)
{
const int base_index = i + (j)*jstep + k*kstep;
int max_i = base_index;
float max = top_node[base_index];
int max2_i = base_index;
float max2 = max;
// speed up with optimized size version
for (int jj = 0; jj < pool_y; jj += 1)
{
for (int ii = 0; ii < pool_x; ii += 1)
{
int index = i + ii + (j + jj)*jstep + k*kstep;
if ((max) < (top_node[index]))
{
max2 = max;
max2_i = max_i;
max = top_node[index];
max_i = index;
}
else if ((max2) < (top_node[index]))
{
max2 = top_node[index];
max2_i = index;
}
}
}
// if(max<1e-5) node.empty_chan[k] = 1;
// else node.empty_chan[k] = 0;
int r = rand() % 100;
float denom = (max + max2);
if (denom == 0)
{
node.x[output_index] = top_node[max_i];
p_map[output_index] = max_i;
}
else
{
int t1 = (int)(100 * max / (max + max2));
if (r <= t1 || train == 0)
{
node.x[output_index] = top_node[max_i];
p_map[output_index] = max_i;
}
else
{
node.x[output_index] = top_node[max2_i];
p_map[output_index] = max2_i;
}
}
output_index++;
}
}
}
}
};
//----------------------------------------------------------------------------------------------------------
// D R O P O U T
//
class dropout_layer : public base_layer
{
float _dropout_rate;
//std::map<const base_layer*, matrix> drop_mask;
matrix drop_mask;
public:
dropout_layer(const char *layer_name, float dropout_rate) : base_layer(layer_name, 1)
{
_has_weights = false;
_dropout_rate = dropout_rate;
p_act = NULL;// new_activation_function("identity");
}
virtual ~dropout_layer() {}
virtual std::string get_config_string() { std::string str = "dropout " + float2str(_dropout_rate)+"\n"; return str; }
virtual void resize(int _w, int _h = 1, int _c = 1)
{
if (_w<1) _w = 1; if (_h<1) _h = 1; if (_c<1) _c = 1;
drop_mask.resize(_w, _h, _c);
base_layer::resize(_w, _h, _c);
}
// no weights
virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train = 1) {}
virtual matrix * new_connection(base_layer &top, int weight_mat_index)
{
resize(top.node.cols, top.node.rows, top.node.chans);
return base_layer::new_connection(top, weight_mat_index);
}
// for dropout...
// we know this is called first in the backward pass, and the train will be set to 1
// when that happens the dropouts will be set.
// different dropouts for each mininbatch... don't know if that matters...
virtual void accumulate_signal(const base_layer &top, const matrix &w, const int train = 0)
{
const float *top_node = top.node.x;
const int size = top.node.chans*top.node.rows*top.node.cols;
memcpy(node.x, top_node, sizeof(float)*size);
// matrix *pmask = &(drop_mask[&top]);
matrix *pmask = &drop_mask;
if (train)
{
pmask->fill(1);
int k;
for (k = 0; k < size; k+=4) // do 4 at a time
{
int r = rand();
if ((r % 100) <= (_dropout_rate*100.f)) { pmask->x[k] = 0.0; node.x[k] *= 0.5f; };
if (((r >> 1) % 100) <= (_dropout_rate*100.f)) { pmask->x[k + 1] = 0.0; node.x[k + 1] *= 0.5f; }
if (((r >> 2) % 100) <= (_dropout_rate*100.f)) { pmask->x[k + 2] = 0.0; node.x[k + 2] *= 0.5f; }
if (((r >> 3) % 100) <= (_dropout_rate*100.f)) { pmask->x[k + 3] = 0.0; node.x[k + 3] *= 0.5f; }
}
int k2 = k - 4;
for (k = k2; k < size; k++)
{
int r = rand();
if ((r % 100) <= (_dropout_rate*100.f)) { pmask->x[k] = 0.0; node.x[k] *= 0.5f; };
}
}
}
#ifndef MOJO_NO_TRAINING
virtual void distribute_delta(base_layer &top, const matrix &w, const int train = 1)
{
// delta *= drop_mask[&top];
delta *= drop_mask;
top.delta += delta;
}
#endif
};
//----------------------------------------------------------------------------------------------------------
// M F M - M a x F e a t u r e M a p
// (A Lightened CNN for Deep Face Representation) http://arxiv.org/pdf/1511.02683.pdf
// the parameter passed in is the number of maps pooled
class maxout_layer : public base_layer
{
int _pool;
matrix max_map;
public:
maxout_layer(const char *layer_name, int pool_chans) : base_layer(layer_name, 1)
{
_pool = pool_chans;
if (_pool < 2) _pool = 2;
p_act = new_activation_function("identity");
_has_weights = false;
}
virtual ~maxout_layer() {}
virtual std::string get_config_string() { std::string str = "mfm" + int2str(_pool) + "\n"; return str; }
virtual void resize(int _w, int _h = 1, int _c = 1)
{
_c /= _pool;
if (_w<1) _w = 1; if (_h<1) _h = 1; if (_c<1) _c = 1;
max_map.resize(_w, _h, _c);
base_layer::resize(_w, _h, _c);
}
inline float df(float *in, int i, int size)
{
return 1.;
};
virtual void activate_nodes() { return; }
// no weights
virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train = 1) {}
virtual matrix * new_connection(base_layer &top, int weight_mat_index)
{
// wasteful to add weight matrix (1x1x1), but makes other parts of code more OO
// bad will happen if try to put more than one pool layer
top.forward_linked_layers.push_back(std::make_pair(weight_mat_index, this));
int w = (top.node.cols) / 1;
int h = (top.node.rows) / 1;
resize(w, h, top.node.chans);
#ifndef MOJO_NO_TRAINING
backward_linked_layers.push_back(std::make_pair(weight_mat_index, &top));
#endif
return NULL;
//return new matrix(1, 1, 1);
}
// for maxout
// we know this is called first in the backward pass, and the train will be set to 1
// when that happens the dropouts will be set.
// different dropouts for each mininbatch... don't know if that matters...
virtual void accumulate_signal(const base_layer &top, const matrix &w, const int train = 0)
{
const float *top_node = top.node.x;
const int chan_size = top.node.rows*top.node.cols;
//const int pool_offset = top.node.chans / _pool;
const int s = chan_size*top.node.chans / _pool;
if((top.node.chans % _pool) !=0)
bail("mfm layer has pool size that is not a multiple of the input channels");
for (int i = 0; i < s; i++)
{
float max = top.node.x[i];
int maxk = i;
for (int k = 1; k < _pool; k++)
{
if (top.node.x[i + (k*s)] > max)
{
//node.x[i + c / 2 * chan_size] = max;
max = top.node.x[i + (k*s)];
maxk = i + (k*s);
// max_map tells which map 0 or 1 when pooling
//max_map.x[i + c / 2 * chan_size] = 0;
}
}
node.x[i] = max;
max_map.x[i] = (float)maxk;
}
}
#ifndef MOJO_NO_TRAINING
virtual void distribute_delta(base_layer &top, const matrix &w, const int train = 1)
{
// const int chan_size = node.cols*node.rows;
// const int pool_offset = top.node.chans / _pool;
const int chan_size = top.node.rows*top.node.cols;
//const int pool_offset = top.node.chans / _pool;
const int s = chan_size*top.node.chans / _pool;
for (int c = 0; c < s; c++)
{
// for (int k = 0; k < node.cols*node.rows; k++)
// {
int maxmap = (int)max_map.x[c];
top.delta.x[maxmap] += delta.x[c];
// }
}
}
#endif
};
//----------------------------------------------------------------------------------------------------------
// C O N V O L U T I O N
//
class convolution_layer : public base_layer
{
int _stride;
public:
int kernel_rows;
int kernel_cols;
int maps;
//int maps_per_kernel;
int kernels_per_map;
convolution_layer(const char *layer_name, int _w, int _c, int _s, activation_function *p ) : base_layer(layer_name, _w, _w, _c)
{
p_act=p; _stride =_s; kernel_rows=_w; kernel_cols=_w; maps=_c;kernels_per_map=0; pad_cols = kernel_cols-1; pad_rows = kernel_rows-1;
_use_bias = true;
}
virtual ~convolution_layer() {
}
virtual std::string get_config_string() {std::string str="convolution "+int2str(kernel_cols)+" "+int2str(maps)+" " + int2str(_stride) + " " +p_act->name+"\n"; return str;}
virtual int fan_size() { return kernel_rows*kernel_cols*maps *kernels_per_map; }
virtual void resize(int _w, int _h=1, int _c=1) // special resize nodes because bias handled differently with shared wts
{
if(kernel_rows*kernel_cols==1) node =matrix(_w,_h,_c); /// use special channel aligned matrix object
else node =matrix(_w,_h,_c,NULL,true); /// use special channel aligned matrix object
bias =matrix(1,1,_c);
bias.fill(0.);
#ifndef MOJO_NO_TRAINING
if(kernel_rows*kernel_cols==1) delta =matrix(_w,_h,_c); /// use special channel aligned matrix object
else delta =matrix(_w,_h,_c,NULL,true); /// use special channel aligned matrix object
#endif
}
// this connection work won't work with multiple top layers (yet)
virtual matrix * new_connection(base_layer &top, int weight_mat_index)
{
top.forward_linked_layers.push_back(std::make_pair(weight_mat_index,this));
#ifndef MOJO_NO_TRAINING
backward_linked_layers.push_back(std::make_pair(weight_mat_index,&top));
#endif
// re-shuffle these things so weights of size kernel w,h,kerns - node of size see below
//int total_kernels=top.node.chans*node.chans;
kernels_per_map += top.node.chans;
resize((top.node.cols-kernel_cols)/_stride+1, (top.node.rows-kernel_rows)/_stride+1, maps);
return new matrix(kernel_cols,kernel_rows, maps*kernels_per_map);
}
// activate_nodes
virtual void activate_nodes()
{
const int map_size = node.rows*node.cols;
const int map_stride = node.chan_stride;
const int _maps = maps;
MOJO_THREAD_THIS_LOOP(_thread_count)
for (int c=0; c<_maps; c++)
{
p_act->fc(&node.x[c*map_stride],map_size,bias.x[c]);
//if(node.x[c*map_stride]!=node.x[c*map_stride]) bail("activate");
}
}
virtual void accumulate_signal( const base_layer &top, const matrix &w, const int train =0)
{
const int kstep = top.node.chan_stride;// NOT the same as top.node.cols*top.node.rows;
const int jstep=top.node.cols;
//int output_index=0;
const int kernel_size=kernel_cols*kernel_rows;
const int kernel_map_step = kernel_size*kernels_per_map;
const int map_size=node.cols*node.rows;
const int map_stride = node.chan_stride;
const float *_w = w.x;
const int top_chans = top.node.chans;
const int map_cnt=maps;
const int w_size = kernel_cols;
const int stride = _stride;
const int node_size= node.cols;
const int top_node_size = top.node.cols;
const int outsize = node_size*node_size;
if(kernel_rows>=2 && (kernel_rows<=5))
{
matrix img_ptr(node_size, node_size, kernel_rows*kernel_rows, NULL, true);
for (int k = 0; k < top_chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
unwrap_aligned_NxN(kernel_rows, img_ptr.x, &top.node.x[k*kstep], jstep, stride);
float *ww = &w.x[(0 + k*maps)*kernel_size];
if(kernel_rows==2)
{
MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count)
for (int map = 0; map < map_cnt; map+=1) dotsum_unwrapped_2x2(img_ptr.x, ww+map*kernel_size, node.x + map_stride*map, outsize);
}
else if(kernel_rows==3)
{
MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count)
for (int map = 0; map < map_cnt; map+=1) dotsum_unwrapped_3x3(img_ptr.x, ww+map*kernel_size, node.x + map_stride*map, outsize);
}
else if(kernel_rows==4)
{
MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count)
for (int map = 0; map < map_cnt; map+=1) dotsum_unwrapped_4x4(img_ptr.x, ww+map*kernel_size, node.x + map_stride*map, outsize);
}
else //(kernel_rows==5)
{
MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count)
for (int map = 0; map < map_cnt; map+=1) dotsum_unwrapped_5x5(img_ptr.x, ww+map*kernel_size, node.x + map_stride*map, outsize);
}
}
}
else if (kernel_rows == 1)
{
for (int k = 0; k < top_chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
const float *_top_node = &top.node.x[k*kstep];
//MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count)
for (int map = 0; map < map_cnt; map++)
{
const float cw = w.x[(map + k*maps)*kernel_size];
const int mapoff = map_size*map;
for (int j = 0; j < node_size*node_size; j += stride) node.x[j + mapoff] += _top_node[j] * cw;
}
}
}
else
{
for(int map=0; map<maps; map++) // how many maps maps= node.chans
{
for(int k=0; k<top_chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count)
for(int j=0; j<node_size; j+= stride) // input h
for(int i=0; i<node_size; i+= stride) // intput w
node.x[i+(j)*node.cols +map_stride*map]+=
unwrap_2d_dot(
&top.node.x[(i)+(j)*jstep + k*kstep],
&w.x[(map+k*maps)*kernel_size],
kernel_cols,
jstep,kernel_cols);
} // k
} // all maps=chans
}
}
#ifndef MOJO_NO_TRAINING
// convolution::distribute_delta
virtual void distribute_delta(base_layer &top, const matrix &w, const int train=1)
{
// here to calculate top_delta += bottom_delta * W
// top_delta.x[s] += bottom_delta.x[t]*w.x[s+t*w.cols];
matrix delta_pad(delta, pad_cols, pad_rows);
//const int kstep=top.delta.cols*top.delta.rows;
const int kstep=top.delta.chan_stride;
const int jstep=top.delta.cols;
const int output_index=0;
const int kernel_size=kernel_cols*kernel_rows;
const int kernel_map_step = kernel_size*kernels_per_map;
const int map_size=delta_pad.cols*delta_pad.rows;
const int map_stride=delta_pad.chan_stride;
const float *_w = w.x;
const int w_size = kernel_cols;
const int delta_size = delta_pad.cols;
const int map_cnt=maps;
const int top_delta_size = top.delta.rows;
const int top_delta_chans = top.delta.chans;
const int stride = _stride;
matrix delt(top.delta.cols, top.delta.rows, top.delta.chans,NULL,true);
if (kernel_cols == 5)
{
//*
matrix img_ptr(delta_size, delta_size, 25, NULL, true);
matrix filter_ptr(28, 1);
//matrix imgout_ptr(outsize + 7, 1);
for (int map = 0; map < map_cnt; map+=1) // how many maps maps= node.chans
{
unwrap_aligned_NxN(5, img_ptr.x, &delta_pad.x[map*map_stride], delta_size, stride);
const int outsize = top_delta_size*top_delta_size;
for (int k = 0; k < top_delta_chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
_w = &w.x[(k*maps + map)*kernel_size];
// flip-flip to make 180 version
for (int ii = 0; ii < 25; ii++) filter_ptr.x[ii] = _w[24 - ii];
//float *out = node.x + map_stride*map;
//float *out = &top.delta.x[k*kstep];
float *out = &delt.x[k*delt.chan_stride];
memcpy(out,&top.delta.x[k*kstep],sizeof(float)*outsize);
dotsum_unwrapped_5x5(img_ptr.x, filter_ptr.x, out, outsize);// imgout_ptr.x, outsize);
memcpy(&top.delta.x[k*kstep],out,sizeof(float)*outsize);
}
}
/*/
matrix filter_ptr(28, 1);
matrix img_ptr(28 * delta_size*delta_size, 1);
matrix imgout_ptr(delta_size*delta_size, 1);
for (int map = 0; map < map_cnt; map++) // how many maps maps= node.chans
{
unwrap_aligned_5x5(img_ptr.x, &delta_pad.x[map*map_stride], delta_size, stride);
const int outsize = top_delta_size*top_delta_size;
for (int k = 0; k < top_delta_chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
_w = &w.x[(k*maps + map)*kernel_size];
// flip-flip to make 180 version
for (int ii = 0; ii < 25; ii++) filter_ptr.x[ii] = _w[24 - ii];
dot_unwrapped_5x5(img_ptr.x, filter_ptr.x, imgout_ptr.x, outsize);
float *out = &top.delta.x[k*kstep];
for (int j = 0; j < outsize; j++) out[j] += imgout_ptr.x[j];
}
}
//*/
// return;
}
else if(kernel_cols==3)
{
matrix img_ptr(delta_size, delta_size, 9, NULL, true);
matrix filter_ptr(9, 1);
//matrix imgout_ptr(outsize + 7, 1);
for (int map = 0; map < map_cnt; map+=1) // how many maps maps= node.chans
{
unwrap_aligned_NxN(3, img_ptr.x, &delta_pad.x[map*map_stride], delta_size, stride);
const int outsize = top_delta_size*top_delta_size;
for (int k = 0; k < top_delta_chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
_w = &w.x[(k*maps + map)*kernel_size];
// flip-flip to make 180 version
for (int ii = 0; ii < 9; ii++) filter_ptr.x[ii] = _w[8 - ii];
//float *out = node.x + map_stride*map;
// float *out = &top.delta.x[k*kstep];
// dotsum_unwrapped_3x3(img_ptr.x, filter_ptr.x, out, outsize);// imgout_ptr.x, outsize);
float *out = &delt.x[k*delt.chan_stride];
memcpy(out,&top.delta.x[k*kstep],sizeof(float)*outsize);
dotsum_unwrapped_3x3(img_ptr.x, filter_ptr.x, out, outsize);// imgout_ptr.x, outsize);
memcpy(&top.delta.x[k*kstep],out,sizeof(float)*outsize);
}
}
}
else if (kernel_cols == 2)
{
matrix img_ptr(delta_size, delta_size, 4, NULL, true);
matrix filter_ptr(4, 1);
matrix out_aligned(top_delta_size,top_delta_size,1,NULL,true);
//matrix imgout_ptr(outsize + 7, 1);
for (int map = 0; map < map_cnt; map+=1) // how many maps maps= node.chans
{
unwrap_aligned_NxN(2, img_ptr.x, &delta_pad.x[map*map_stride], delta_size, stride);
const int outsize = top_delta_size*top_delta_size;
for (int k = 0; k < top_delta_chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
_w = &w.x[(k*maps + map)*kernel_size];
// flip-flip to make 180 version
for (int ii = 0; ii < 4; ii++) filter_ptr.x[ii] = _w[3 - ii];
memcpy(out_aligned.x, &top.delta.x[k*kstep],outsize*sizeof(float));
//float *out = node.x + map_stride*map;
float *out = out_aligned.x;// &top.delta.x[k*kstep];
dotsum_unwrapped_2x2(img_ptr.x, filter_ptr.x, out, outsize);// imgout_ptr.x, outsize);
memcpy(&top.delta.x[k*kstep],out_aligned.x,outsize*sizeof(float));
}
}
}
else if (kernel_cols == 1)
{
for (int j = 0; j<top.delta.rows; j += stride) // input h
{
for (int i = 0; i<top.delta.cols; i += stride) // intput w
{
for (int k = 0; k<top.delta.chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
int td_i = i + (j)*jstep + k*kstep;
float *delt = &delta_pad.x[i + (j)*delta_pad.cols + 0*map_stride];
float *wx = &w.x[(0 + k*maps)*kernel_size];
for (int map = 0; map<maps; map++) // how many maps maps= node.chans
{
top.delta.x[td_i] += (*delt) * (*wx);
delt += map_stride;
wx += kernel_size;
} // all input chans
//output_index++;
}
}
} //y
}
else
{
for(int j=0; j<top.delta.rows; j+=stride) // input h
{
for(int i=0; i<top.delta.cols; i+=stride) // intput w
{
for(int k=0; k<top.delta.chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
int td_i = i+(j)*jstep + k*kstep;
for(int map=0; map<maps; map++) // how many maps maps= node.chans
{
top.delta.x[td_i] += unwrap_2d_dot_rot180(
&delta_pad.x[i+(j)*delta_pad.cols + map*map_stride],
&w.x[(map+k*maps)*kernel_size],
kernel_cols,
delta_pad.cols,kernel_cols);
} // all input chans
//output_index++;
}
}
} //y
} // all maps=chans
}
// convolution::calculate_dw
virtual void calculate_dw(const base_layer &top, matrix &dw, const int train =1)
{
int kstep=top.delta.chan_stride;
int jstep=top.delta.cols;
int output_index=0;
int kernel_size=kernel_cols*kernel_rows;
int kernel_map_step = kernel_size*kernels_per_map;
int map_size=delta.cols*delta.rows;
int map_stride=delta.chan_stride;
dw.resize(kernel_cols, kernel_rows,kernels_per_map*maps);
dw.fill(0);
// node x already init to 0
output_index=0;
const int stride = _stride;
const int top_node_size= top.node.cols;
const int node_size = node.rows;
const int delta_size = delta.cols;
const int kern_len=kernel_cols;
const float *_top;
if(kern_len==5)
{
for(int map=0; map<maps; map++) // how many maps maps= node.chans
{
const float *_delta =&delta.x[map*map_stride];
for(int k=0; k<top.node.chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
_top = &top.node.x[k*kstep];
const int w_i = (map+k*maps)*kernel_size;
const float *_t=_top;
float *_w=dw.x+w_i;
_w[0]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[1]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[2]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[3]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[4]+= unwrap_2d_dot( _t, _delta, node_size,top_node_size, delta_size);
_t=_top+jstep;
_w=dw.x+w_i+kern_len;
_w[0]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[1]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[2]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[3]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[4]+= unwrap_2d_dot( _t, _delta, node_size,top_node_size, delta_size);
_t=_top+jstep*2;
_w=dw.x+w_i+kern_len*2;
_w[0]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[1]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[2]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[3]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[4]+= unwrap_2d_dot( _t, _delta, node_size,top_node_size, delta_size);
_t=_top+jstep*3;
_w=dw.x+w_i+kern_len*3;
_w[0]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[1]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[2]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[3]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[4]+= unwrap_2d_dot( _t, _delta, node_size,top_node_size, delta_size);
_t=_top+jstep*4;
_w=dw.x+w_i+kern_len*4;
_w[0]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[1]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[2]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[3]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size);
_w[4]+= unwrap_2d_dot( _t, _delta, node_size,top_node_size, delta_size);
} //y
} // all maps=chans
}
else if(kern_len==3)
{
for(int map=0; map<maps; map++) // how many maps maps= node.chans
{
const float *_delta =&delta.x[map*map_stride];
for(int k=0; k<top.node.chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
_top = &top.node.x[k*kstep];
const int w_i = (map+k*maps)*kernel_size;
dw.x[w_i+0+(0)*kern_len]+= unwrap_2d_dot( _top + 0+(0)*jstep, _delta, node_size,top_node_size, delta_size);
dw.x[w_i+1+(0)*kern_len]+= unwrap_2d_dot( _top + 1+(0)*jstep, _delta, node_size,top_node_size, delta_size);
dw.x[w_i+2+(0)*kern_len]+= unwrap_2d_dot( _top + 2+(0)*jstep, _delta, node_size,top_node_size, delta_size);
dw.x[w_i+0+(1)*kern_len]+= unwrap_2d_dot( _top + 0+(1)*jstep, _delta, node_size,top_node_size, delta_size);
dw.x[w_i+1+(1)*kern_len]+= unwrap_2d_dot( _top + 1+(1)*jstep, _delta, node_size,top_node_size, delta_size);
dw.x[w_i+2+(1)*kern_len]+= unwrap_2d_dot( _top + 2+(1)*jstep, _delta, node_size,top_node_size, delta_size);
dw.x[w_i+0+(2)*kern_len]+= unwrap_2d_dot( _top + 0+(2)*jstep, _delta, node_size,top_node_size, delta_size);
dw.x[w_i+1+(2)*kern_len]+= unwrap_2d_dot( _top + 1+(2)*jstep, _delta, node_size,top_node_size, delta_size);
dw.x[w_i+2+(2)*kern_len]+= unwrap_2d_dot( _top + 2+(2)*jstep, _delta, node_size,top_node_size, delta_size);
} //y
} // all maps=chans
}
else
{
for(int map=0; map<maps; map++) // how many maps maps= node.chans
{
const float *_delta =&delta.x[map*map_stride];
for(int k=0; k<top.node.chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
_top = &top.node.x[k*kstep];
const int w_i = (map+k*maps)*kernel_size;
for(int jj=0; jj<kern_len; jj+=1)
{
for(int ii=0; ii<kern_len; ii+=1)
{
dw.x[w_i+ii+(jj)*kern_len]+= unwrap_2d_dot( _top + ii+(jj)*jstep, _delta,
node_size,top_node_size, delta_size);
} // all input chans
} // x
} //y
} // all maps=chans
}
}
#endif
};
//----------------------------------------------------------------------------------------------------------
// D E E P C N E T
// 2x2 convolution followed by 2x2 max pool
// odd number should be in-size, then -1 after convolution and divide by 2 for output size
class deepcnet_layer : public base_layer
{
int _stride;
matrix conv_delta;
std::vector<int> _max_map;
public:
int kernel_rows;
int kernel_cols;
int maps;
//int maps_per_kernel;
int kernels_per_map;
static const int _pool=2;
deepcnet_layer(const char *layer_name, int _c, activation_function *p) : base_layer(layer_name, 2, 2, _c)
{
p_act = p; _stride = 1; kernel_rows = 2; kernel_cols = 2; maps = _c;
kernels_per_map = 0; pad_cols = 1; pad_rows = 1;
_use_bias = true;
}
virtual ~deepcnet_layer() {}
virtual std::string get_config_string() { std::string str = "deepcnet " + int2str(maps) + " " + p_act->name + "\n"; return str; }
virtual int fan_size() { return kernel_rows*kernel_cols*maps *kernels_per_map; }
virtual void resize(int _w, int _h = 1, int _c = 1) // special resize nodes because bias handled differently with shared wts
{
node = matrix(_w, _h, _c);
bias = matrix(1, 1, _c);
bias.fill(0.);
_max_map.resize(_w*_h*_c);
conv_delta = matrix(_w*_pool, _h*_pool, maps);
#ifndef MOJO_NO_TRAINING
delta = matrix(_w, _h, _c, NULL, true);
#endif
}
// this connection work won't work with multiple top layers (yet)
virtual matrix * new_connection(base_layer &top, int weight_mat_index)
{
top.forward_linked_layers.push_back(std::make_pair(weight_mat_index, this));
#ifndef MOJO_NO_TRAINING
backward_linked_layers.push_back(std::make_pair(weight_mat_index, &top));
#endif
// re-shuffle these things so weights of size kernel w,h,kerns - node of size see below
//int total_kernels=top.node.chans*node.chans;
kernels_per_map += top.node.chans;
resize((top.node.cols - 1) / _pool, (top.node.rows - 1) / _pool, maps);
return new matrix(kernel_cols, kernel_rows, maps*kernels_per_map);
}
// activate_nodes
virtual void activate_nodes()
{
const int map_size = node.rows*node.cols;
const int map_stride = node.chan_stride;
const int _maps = maps;
MOJO_THREAD_THIS_LOOP(_thread_count)
for (int c=0; c<_maps; c++) p_act->fc(&node.x[c*map_stride],map_size,bias.x[c]);
}
virtual void accumulate_signal(const base_layer &top, const matrix &w, const int train = 0)
{
const int kstep = top.node.chan_stride;
const int jstep = top.node.cols;
//int output_index=0;
const int kernel_size = kernel_cols*kernel_rows;
const int kernel_map_step = kernel_size*kernels_per_map;
const int pool_map_size = node.cols*node.rows;
const int pool_map_stride = node.chan_stride;
const float *_w = w.x;
const int top_chans = top.node.chans;
const int map_cnt = maps;
const int w_size = kernel_cols;
const int stride = _stride;
const int conv_size = node.cols * _pool;
const int pool_size = node.cols;
const int top_node_size = top.node.cols;
const int outsize = pool_size*pool_size;
int *p_map = _max_map.data();
matrix imgsum_ptr(jstep-1,jstep-1,maps,NULL,true);
imgsum_ptr.fill(0);
matrix img_ptr( top.node.cols, top.node.cols, 2*2, NULL, true);
//#pragma omp parallel for schedule(guided) num_threads(_thread_count)
for (int k = 0; k < top_chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
unwrap_aligned_NxN(2, img_ptr.x, &top.node.x[k*kstep], jstep, 1);
// MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count)
MOJO_THREAD_THIS_LOOP(_thread_count)
for (int map = 0; map < map_cnt; map+=1) // how many maps maps= node.chans
{
//std::cout << omp_get_thread_num();
float *out = imgsum_ptr.x + imgsum_ptr.chan_stride*map;
dotsum_unwrapped_2x2(img_ptr.x, &w.x[(map + k*maps)*kernel_size], out, (jstep-1)*(jstep-1));
}
}
int idx = 0;
for (int map = 0; map < map_cnt; map++) // how many maps maps= node.chans
{
float *out = node.x + pool_map_stride*map;
float *sum = imgsum_ptr.x + imgsum_ptr.chan_stride*map;
int cnt=0;
for (int j = 0; j < conv_size; j += _pool)
{
for (int i = 0; i < conv_size; i += _pool)
{
int maxi = i + j*conv_size;
if (sum[maxi] < sum[i + 1 + j*conv_size])
maxi = i + 1 + j*conv_size;
if (sum[maxi] < sum[i + (j + 1)*conv_size])
maxi = i + (j + 1)*conv_size;
if (sum[maxi] < sum[i + 1 + (j + 1)*conv_size])
maxi = i + 1 + (j + 1)*conv_size;
//const int pool_idx = (i + j * pool_size) / _pool;
out[cnt] = sum[maxi];
p_map[idx] = maxi+ conv_size*conv_size*map;
idx++;
cnt++;
}
}
}
}
#ifndef MOJO_NO_TRAINING
// convolution::distribute_delta
virtual void distribute_delta(base_layer &top, const matrix &w, const int train = 1)
{
// here to calculate top_delta += bottom_delta * W
// top_delta.x[s] += bottom_delta.x[t]*w.x[s+t*w.cols];
const int kstep = top.delta.chan_stride;
const int jstep = top.delta.cols;
const int output_index = 0;
const int kernel_size = kernel_cols*kernel_rows;
const int kernel_map_step = kernel_size*kernels_per_map;
const float *_w = w.x;
const int w_size = kernel_cols;
const int map_cnt = maps;
const int top_delta_size = top.delta.rows;
const int top_delta_chans = top.delta.chans;
const int stride = _stride;
//mojo::matrix intermediate_delta(delta.cols * 2, delta.rows * 2, delta.chans);
conv_delta.fill(0);
int *p_map = _max_map.data();
const int s = (int)_max_map.size();
// put the maxpool result
for (int k = 0; k<s; k++) conv_delta.x[p_map[k]] += delta.x[k];
// std::cout << "deepc max";
// for (int i = 0; i < 10; i++) std::cout << delta.x[i] << ",";
/// std::cout << "topc max";
// for (int i = 0; i < 10; i++) std::cout << conv_delta.x[i] << ",";
matrix delta_pad(conv_delta, pad_cols, pad_rows);
const int map_size = delta_pad.cols*delta_pad.rows;
const int map_stride = delta_pad.chan_stride;
const int delta_size = delta_pad.cols;
matrix img_ptr(delta_size, delta_size, 4, NULL, true);
matrix filter_ptr(4, 1);
matrix delt(top.delta.cols, top.delta.rows, top.delta.chans,NULL,true);
//matrix imgout_ptr(outsize + 7, 1);
for (int map = 0; map < map_cnt; map+=1) // how many maps maps= node.chans
{
unwrap_aligned_NxN(2, img_ptr.x, &delta_pad.x[map*map_stride], delta_size, stride);
const int outsize = top_delta_size*top_delta_size;
for (int k = 0; k < top_delta_chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
_w = &w.x[(k*maps + map)*kernel_size];
// flip-flip to make 180 version
for (int ii = 0; ii < 4; ii++) filter_ptr.x[ii] = _w[3 - ii];
//float *out = node.x + map_stride*map;
float *out = &delt.x[k*delt.chan_stride];
memcpy(out,&top.delta.x[k*kstep],sizeof(float)*outsize);
dotsum_unwrapped_2x2(img_ptr.x, filter_ptr.x, out, outsize);// imgout_ptr.x, outsize);
memcpy(&top.delta.x[k*kstep],out,sizeof(float)*outsize);
// float *out = &top.delta.x[k*kstep];
// dotsum_unwrapped_2x2(img_ptr.x, filter_ptr.x, out, outsize);// imgout_ptr.x, outsize);
}
}
}
// convolution::calculate_dw
virtual void calculate_dw(const base_layer &top, matrix &dw, const int train = 1)
{
int kstep = top.delta.cols*top.delta.rows;
int jstep = top.delta.cols;
int output_index = 0;
int kernel_size = kernel_cols*kernel_rows;
int kernel_map_step = kernel_size*kernels_per_map;
int map_size = conv_delta.cols*conv_delta.rows;
dw.resize(kernel_cols, kernel_rows, kernels_per_map*maps);
dw.fill(0);
// node x already init to 0
output_index = 0;
const int stride = _stride;
const int top_node_size = top.node.cols;
const int delta_size = conv_delta.cols;
const int kern_len = kernel_cols;
const float *_top;
for (int map = 0; map<maps; map++) // how many maps maps= node.chans
{
const float *_delta = &conv_delta.x[map*map_size];
for (int k = 0; k<top.node.chans; k++) // input channels --- same as kernels_per_map - kern for each input
{
_top = &top.node.x[k*kstep];
const int w_i = (map + k*maps)*kernel_size;
for (int jj = 0; jj<kern_len; jj += 1)
{
for (int ii = 0; ii<kern_len; ii += 1)
{
dw.x[w_i + ii + (jj)*kern_len] +=
unwrap_2d_dot(_top + ii + (jj)*jstep, _delta,
delta_size, top_node_size, delta_size);
} // all input chans
} // x
} //y
} // all maps=chans
}
#endif
};
//----------------------------------------------------------------------------------------------------------
// C O N C A T E N A T I O N | R E S I Z E | P A D
//
// puts a set of output maps together and pads to the desired size
class concatenation_layer : public base_layer
{
std::map<const base_layer*, int> layer_to_channel; // name-to-index of layer for layer management
int _maps;
mojo::pad_type _pad_type;
public:
concatenation_layer(const char *layer_name, int _w, int _h, mojo::pad_type p= mojo::zero) : base_layer(layer_name, _w, _h)
{
_maps = 0;
_pad_type = p;
_has_weights = false;
p_act = NULL;// new_activation_function("identity");
}
virtual ~concatenation_layer() {}
virtual std::string get_config_string()
{
std::string str_p = " zero\n";
if (_pad_type == mojo::edge) str_p = " edge\n";
else if (_pad_type == mojo::median_edge) str_p = " median_edge\n";
std::string str = "concatenate " + int2str(node.cols) + str_p;
return str;
}
// this connection work won't work with multiple top layers (yet)
virtual matrix * new_connection(base_layer &top, int weight_mat_index)
{
//if (layer_to_channel[&top]) bail("layer already addded to pad layer"); //already exists
layer_to_channel[&top] = _maps;
_maps += top.node.chans;
resize(node.cols, node.rows, _maps);
return base_layer::new_connection(top, weight_mat_index);
}
// no weights
virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train = 1) {}
virtual void accumulate_signal(const base_layer &top, const matrix &w, const int train = 0)
{
const float *top_node = top.node.x;
const int size = node.rows*node.cols;
int opadx = node.cols - top.node.cols;
int opady = node.rows - top.node.rows;
int padx=0, pady=0, padx_ex=0, pady_ex=0;
if (opadx > 0) padx = opadx/2;
if (opady > 0) pady = opady/2;
if (opadx % 2 != 0) {
padx_ex = 1;
}
if (opady % 2 != 0) {
pady_ex = 1;
}
int map_offset = layer_to_channel[&top];
if (padx+ padx_ex > 0 || pady+ pady_ex > 0 )
{
matrix m = top.node.pad(padx, pady, padx+ padx_ex, pady+pady_ex, _pad_type, _thread_count);
memcpy(node.x + node.chan_stride*map_offset, m.x, sizeof(float)*m.size());
}
else if((node.cols == top.node.cols) && (node.rows == top.node.rows))
{
memcpy(node.x + node.chan_stride*map_offset, top.node.x, sizeof(float)*top.node.size());
}
else
{
// crop
int dx = abs(padx) / 2;
int dy = abs(pady) / 2;
matrix m = top.node.crop(dx, dy, node.cols, node.rows, _thread_count);
memcpy(node.x + node.chan_stride*map_offset, m.x, sizeof(float)*m.size());
}
}
#ifndef MOJO_NO_TRAINING
virtual void distribute_delta(base_layer &top, const matrix &w, const int train = 1)
{
int map_offset = layer_to_channel[&top];
int padx = node.cols - top.node.cols;
int pady = node.rows - top.node.rows;
if (padx > 0) padx /= 2;
if (pady > 0) pady /= 2;
if (padx > 0 || pady > 0)
{
matrix m = delta.get_chans(map_offset, top.delta.chans);
top.delta += m.crop(padx, pady, top.delta.cols, top.delta.rows);
}
else if ((node.cols == top.node.cols) && (node.rows == top.node.rows))
{
top.delta += delta.get_chans(map_offset, top.delta.chans);
}
else
{
matrix m = delta.get_chans(map_offset, top.delta.chans);
// pad
int dx = abs(padx) / 2;
int dy = abs(pady) / 2;
top.delta += m.pad(dx, dy);
}
}
#endif
};
//--------------------------------------------------
// N E W L A Y E R
//
// "input", "fully_connected","max_pool","convolution","concatination"
base_layer *new_layer(const char *layer_name, const char *config)
{
std::istringstream iss(config);
std::string str;
iss>>str;
int w,h,c,s;
if(str.compare("input")==0)
{
iss>>w; iss>>h; iss>>c;
return new input_layer(layer_name, w,h,c);
}
else if(str.compare("fully_connected")==0)
{
std::string act;
iss>>c; iss>>act;
return new fully_connected_layer(layer_name, c, new_activation_function(act));
}
else if (str.compare("softmax") == 0)
{
//std::string act;
iss >> c; //iss >> act;
return new fully_connected_layer(layer_name, c, new_activation_function("softmax"));
}
else if(str.compare("max_pool")==0)
{
iss >> c; iss >> s;
if(s>0 && s<=c)
return new max_pooling_layer(layer_name, c, s);
else
return new max_pooling_layer(layer_name, c);
}
else if (str.compare("mfm") == 0)
{
iss >> c;
return new maxout_layer(layer_name, c);
}
/*
else if (str.compare("activation") == 0)
{
iss >> s;
return new activation_layer(layer_name, s);
}
*/
else if (str.compare("semi_stochastic_pool") == 0)
{
iss >> c; iss >> s;
if (s>0 && s <= c)
return new semi_stochastic_pooling_layer(layer_name, c, s);
else
return new semi_stochastic_pooling_layer(layer_name, c);
}
else if (str.compare("deepcnet") == 0)
{
std::string act;
iss >> c; iss >> act;
return new deepcnet_layer(layer_name, c, new_activation_function(act));
}
else if(str.compare("convolution")==0)
{
std::string act;
iss>>w;iss>>c; iss >> s; iss>>act;
return new convolution_layer(layer_name, w,c,s, new_activation_function(act));
}
else if (str.compare("dropout") == 0)
{
float fc;
iss >> fc;
return new dropout_layer(layer_name, fc);
}
else if((str.compare("resize")==0) || (str.compare("concatenate") == 0))
{
std::string pad;
iss>>w;
iss >> pad;
mojo::pad_type p = mojo::zero;
if (pad.compare("median") == 0) p = mojo::median_edge;
else if (pad.compare("median_edge") == 0) p = mojo::median_edge;
else if (pad.compare("edge") == 0) p = mojo::edge;
return new concatenation_layer(layer_name, w,w, p);
}
else
{
//fprintf(stderr, "ERROR : layer type not valid: '%s'", str);
bail("ERROR : layer type not valid: '" + str + "'\n");
}
return NULL;
}
} // namespace
|
adi.pluto_ancc.seq_par.c |
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
double X[N][N+20];
double A[N][N+20];
double B[N][N+20];
void init_arrays()
{
int i, j;
for (i=0; i<N; i++)
for (j=0; j<N; j++)
{
A[i][j] = (1+(i*j)%1024)/2.0;
B[i][j] = (1+(i*j)%1024)/3.0;
X[i][j] = (1+(i*j)%1024)/3.0;
}
}
double rtclock()
{
struct timezone tzp;
struct timeval tp;
int stat;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main()
{
init_arrays();
double annot_t_start=0, annot_t_end=0, annot_t_total=0;
int annot_i;
for (annot_i=0; annot_i<REPS; annot_i++)
{
annot_t_start = rtclock();
register int i,j,k,t;
register int c1t, c2t, c3t, c4t, c5t, c6t, c7t, c8t, c9t, c10t, c11t, c12t;
register int newlb_c1, newlb_c2, newlb_c3, newlb_c4, newlb_c5, newlb_c6,
newlb_c7, newlb_c8, newlb_c9, newlb_c10, newlb_c11, newlb_c12;
register int newub_c1, newub_c2, newub_c3, newub_c4, newub_c5, newub_c6,
newub_c7, newub_c8, newub_c9, newub_c10, newub_c11, newub_c12;
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
int c1, c2, c3, c4, c5, c6, c7, c8, c9, c10;
register int lb, ub, lb1, ub1, lb2, ub2;
/* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 0.72s. */
for (c1=-1;c1<=floord(3*T+2*N-5,32);c1++) {
lb1=max(max(0,ceild(32*c1-15,48)),ceild(32*c1-T+1,32));
ub1=min(min(floord(32*c1+31,32),floord(T+N-2,16)),floord(32*c1+N+30,48));
#pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9,c10)
for (c2=lb1; c2<=ub1; c2++) {
for (c3=max(max(ceild(16*c2-N-125,128),ceild(16*c1-16*c2-63,64)),0);c3<=min(min(floord(32*c1-32*c2+N+31,128),floord(T+N-1,128)),floord(16*c2+N+14,128));c3++) {
for (c6=max(max(max(ceild(16*c1-16*c2-7,8),8*c3),ceild(16*c2-N-13,16)),0);c6<=min(min(min(8*c3+7,floord(32*c1-32*c2+N+31,16)),floord(16*c2+N+14,16)),floord(T+N-1,16));c6++) {
if ((c1 <= floord(32*c2+16*c6-N,32)) && (c2 <= floord(16*c6-1,16)) && (c6 >= ceild(N,16))) {
for (c8=max(16*c6-N+1,16*c2);c8<=min(16*c2+15,16*c6-1);c8++) {
X[-16*c6+c8+N][N-1]=X[-16*c6+c8+N][N-1]-X[-16*c6+c8+N-1][N-1]*A[-16*c6+c8+N][N-1]/B[-16*c6+c8+N-1][N-1] ;
B[-16*c6+c8+N][N-1]=B[-16*c6+c8+N][N-1]-A[-16*c6+c8+N][N-1]*A[-16*c6+c8+N][N-1]/B[-16*c6+c8+N-1][N-1] ;
}
}
for (c7=max(max(max(0,16*c6-N+1),32*c1-32*c2),16*c2-N+1);c7<=min(min(min(32*c1-32*c2+31,T-1),16*c2-1),16*c6-N+15);c7++) {
for (c8=16*c2;c8<=min(16*c2+15,c7+N-1);c8++) {
for (c9=16*c6;c9<=c7+N-1;c9++) {
X[-c7+c8][-c7+c9]=X[-c7+c8][-c7+c9]-X[-c7+c8][-c7+c9-1]*A[-c7+c8][-c7+c9]/B[-c7+c8][-c7+c9-1] ;
B[-c7+c8][-c7+c9]=B[-c7+c8][-c7+c9]-A[-c7+c8][-c7+c9]*A[-c7+c8][-c7+c9]/B[-c7+c8][-c7+c9-1] ;
X[-c7+c8][-c7+c9-1]=X[-c7+c8][-c7+c9-1]-X[-c7+c8-1][-c7+c9-1]*A[-c7+c8][-c7+c9-1]/B[-c7+c8-1][-c7+c9-1] ;
B[-c7+c8][-c7+c9-1]=B[-c7+c8][-c7+c9-1]-A[-c7+c8][-c7+c9-1]*A[-c7+c8][-c7+c9-1]/B[-c7+c8-1][-c7+c9-1] ;
}
X[-c7+c8][N-1]=X[-c7+c8][N-1]-X[-c7+c8-1][N-1]*A[-c7+c8][N-1]/B[-c7+c8-1][N-1] ;
B[-c7+c8][N-1]=B[-c7+c8][N-1]-A[-c7+c8][N-1]*A[-c7+c8][N-1]/B[-c7+c8-1][N-1] ;
}
}
for (c7=max(max(max(16*c2,0),16*c6-N+1),32*c1-32*c2);c7<=min(min(min(32*c1-32*c2+31,16*c2+14),T-1),16*c6-N+15);c7++) {
for (c9=16*c6;c9<=c7+N-1;c9++) {
X[0][-c7+c9]=X[0][-c7+c9]-X[0][-c7+c9-1]*A[0][-c7+c9]/B[0][-c7+c9-1] ;
B[0][-c7+c9]=B[0][-c7+c9]-A[0][-c7+c9]*A[0][-c7+c9]/B[0][-c7+c9-1] ;
}
for (c8=c7+1;c8<=16*c2+15;c8++) {
for (c9=16*c6;c9<=c7+N-1;c9++) {
X[-c7+c8][-c7+c9]=X[-c7+c8][-c7+c9]-X[-c7+c8][-c7+c9-1]*A[-c7+c8][-c7+c9]/B[-c7+c8][-c7+c9-1] ;
B[-c7+c8][-c7+c9]=B[-c7+c8][-c7+c9]-A[-c7+c8][-c7+c9]*A[-c7+c8][-c7+c9]/B[-c7+c8][-c7+c9-1] ;
X[-c7+c8][-c7+c9-1]=X[-c7+c8][-c7+c9-1]-X[-c7+c8-1][-c7+c9-1]*A[-c7+c8][-c7+c9-1]/B[-c7+c8-1][-c7+c9-1] ;
B[-c7+c8][-c7+c9-1]=B[-c7+c8][-c7+c9-1]-A[-c7+c8][-c7+c9-1]*A[-c7+c8][-c7+c9-1]/B[-c7+c8-1][-c7+c9-1] ;
}
X[-c7+c8][N-1]=X[-c7+c8][N-1]-X[-c7+c8-1][N-1]*A[-c7+c8][N-1]/B[-c7+c8-1][N-1] ;
B[-c7+c8][N-1]=B[-c7+c8][N-1]-A[-c7+c8][N-1]*A[-c7+c8][N-1]/B[-c7+c8-1][N-1] ;
}
}
{
for (c7t=max(max(max(32*c1-32*c2,0),16*c2-N+1),16*c6-N+16); c7t<=min(min(min(T-1,32*c1-32*c2+31),16*c6+14),16*c2-1)-3; c7t=c7t+4) {
newlb_c9=-2147483648;
newub_c9=16*c6+15;
register int cbv_1;
cbv_1=c7t+3;
#pragma ivdep
#pragma vector always
for (c7=c7t; c7<=cbv_1; c7=c7+1) {
newlb_c9=max(newlb_c9,max(16*c6,c7+1));
}
for (c7=c7t; c7<=c7t+3; c7=c7+1) {
for (c9=max(16*c6,c7+1); c9<=newlb_c9-1; c9=c9+1) {
register int cbv_2, cbv_3;
cbv_2=16*c2;
cbv_3=min(c7+N-1,16*c2+15)-3;
#pragma ivdep
#pragma vector always
for (c8t=cbv_2; c8t<=cbv_3; c8t=c8t+4) {
double scv_1, scv_2, scv_3, scv_4, scv_5, scv_6, scv_7, scv_8;
double scv_9, scv_10, scv_11, scv_12, scv_13, scv_14, scv_15, scv_16;
double scv_17, scv_18, scv_19, scv_20, scv_21, scv_22, scv_23, scv_24;
double scv_25, scv_26, scv_27, scv_28;
scv_1=X[-c7+(c8t+2)][-c7+c9-1];
scv_2=X[-c7+(c8t+1)][-c7+c9-1];
scv_3=B[-c7+(c8t+1)][-c7+c9];
scv_4=B[-c7+c8t][-c7+c9];
scv_5=B[-c7+(c8t+3)-1][-c7+c9-1];
scv_6=A[-c7+c8t][-c7+c9];
scv_7=B[-c7+c8t-1][-c7+c9-1];
scv_8=B[-c7+(c8t+2)][-c7+c9];
scv_9=B[-c7+(c8t+2)][-c7+c9-1];
scv_10=X[-c7+(c8t+3)][-c7+c9];
scv_11=A[-c7+(c8t+1)][-c7+c9];
scv_12=X[-c7+c8t][-c7+c9];
scv_13=B[-c7+(c8t+1)][-c7+c9-1];
scv_14=X[-c7+(c8t+1)][-c7+c9];
scv_15=B[-c7+(c8t+1)-1][-c7+c9-1];
scv_16=A[-c7+(c8t+1)][-c7+c9-1];
scv_17=B[-c7+(c8t+3)][-c7+c9-1];
scv_18=X[-c7+(c8t+3)][-c7+c9-1];
scv_19=B[-c7+c8t][-c7+c9-1];
scv_20=A[-c7+(c8t+3)][-c7+c9];
scv_21=A[-c7+(c8t+3)][-c7+c9-1];
scv_22=A[-c7+c8t][-c7+c9-1];
scv_23=B[-c7+(c8t+3)][-c7+c9];
scv_24=X[-c7+c8t][-c7+c9-1];
scv_25=A[-c7+(c8t+2)][-c7+c9-1];
scv_26=B[-c7+(c8t+2)-1][-c7+c9-1];
scv_27=A[-c7+(c8t+2)][-c7+c9];
scv_28=X[-c7+(c8t+2)][-c7+c9];
scv_12=scv_12-scv_24*scv_6/scv_19;
scv_14=scv_14-scv_2*scv_11/scv_13;
scv_28=scv_28-scv_1*scv_27/scv_9;
scv_10=scv_10-scv_18*scv_20/scv_17;
scv_4=scv_4-scv_6*scv_6/scv_19;
scv_3=scv_3-scv_11*scv_11/scv_13;
scv_8=scv_8-scv_27*scv_27/scv_9;
scv_23=scv_23-scv_20*scv_20/scv_17;
scv_24=scv_24-X[-c7+c8t-1][-c7+c9-1]*scv_22/scv_7;
scv_2=scv_2-X[-c7+(c8t+1)-1][-c7+c9-1]*scv_16/scv_15;
scv_1=scv_1-X[-c7+(c8t+2)-1][-c7+c9-1]*scv_25/scv_26;
scv_18=scv_18-X[-c7+(c8t+3)-1][-c7+c9-1]*scv_21/scv_5;
scv_19=scv_19-scv_22*scv_22/scv_7;
scv_13=scv_13-scv_16*scv_16/scv_15;
scv_9=scv_9-scv_25*scv_25/scv_26;
scv_17=scv_17-scv_21*scv_21/scv_5;
X[-c7+(c8t+2)][-c7+c9-1]=scv_1;
X[-c7+(c8t+1)][-c7+c9-1]=scv_2;
B[-c7+(c8t+1)][-c7+c9]=scv_3;
B[-c7+c8t][-c7+c9]=scv_4;
B[-c7+(c8t+2)][-c7+c9]=scv_8;
B[-c7+(c8t+2)][-c7+c9-1]=scv_9;
X[-c7+(c8t+3)][-c7+c9]=scv_10;
X[-c7+c8t][-c7+c9]=scv_12;
B[-c7+(c8t+1)][-c7+c9-1]=scv_13;
X[-c7+(c8t+1)][-c7+c9]=scv_14;
B[-c7+(c8t+3)][-c7+c9-1]=scv_17;
X[-c7+(c8t+3)][-c7+c9-1]=scv_18;
B[-c7+c8t][-c7+c9-1]=scv_19;
B[-c7+(c8t+3)][-c7+c9]=scv_23;
X[-c7+c8t][-c7+c9-1]=scv_24;
X[-c7+(c8t+2)][-c7+c9]=scv_28;
}
register int cbv_4;
cbv_4=min(c7+N-1,16*c2+15);
#pragma ivdep
#pragma vector always
for (c8=c8t; c8<=cbv_4; c8=c8+1) {
double scv_29, scv_30, scv_31, scv_32, scv_33, scv_34, scv_35;
scv_29=X[-c7+c8][-c7+c9-1];
scv_30=X[-c7+c8][-c7+c9];
scv_31=B[-c7+c8][-c7+c9-1];
scv_32=B[-c7+c8][-c7+c9];
scv_33=A[-c7+c8][-c7+c9-1];
scv_34=A[-c7+c8][-c7+c9];
scv_35=B[-c7+c8-1][-c7+c9-1];
scv_30=scv_30-scv_29*scv_34/scv_31;
scv_32=scv_32-scv_34*scv_34/scv_31;
scv_29=scv_29-X[-c7+c8-1][-c7+c9-1]*scv_33/scv_35;
scv_31=scv_31-scv_33*scv_33/scv_35;
X[-c7+c8][-c7+c9-1]=scv_29;
X[-c7+c8][-c7+c9]=scv_30;
B[-c7+c8][-c7+c9-1]=scv_31;
B[-c7+c8][-c7+c9]=scv_32;
}
}
}
for (c9t=newlb_c9; c9t<=newub_c9-3; c9t=c9t+4) {
newlb_c8=16*c2;
newub_c8=2147483647;
register int cbv_5;
cbv_5=c7t+3;
#pragma ivdep
#pragma vector always
for (c7=c7t; c7<=cbv_5; c7=c7+1) {
newub_c8=min(newub_c8,min(c7+N-1,16*c2+15));
}
for (c7=c7t; c7<=c7t+3; c7=c7+1) {
register int cbv_6, cbv_7;
cbv_6=16*c2;
cbv_7=newlb_c8-1;
#pragma ivdep
#pragma vector always
for (c8=cbv_6; c8<=cbv_7; c8=c8+1) {
double scv_36, scv_37, scv_38, scv_39, scv_40, scv_41, scv_42, scv_43;
double scv_44, scv_45, scv_46, scv_47, scv_48, scv_49, scv_50, scv_51;
double scv_52, scv_53, scv_54, scv_55, scv_56, scv_57, scv_58, scv_59;
double scv_60, scv_61, scv_62, scv_63;
scv_36=B[-c7+c8-1][-c7+(c9t+2)-1];
scv_37=B[-c7+c8][-c7+(c9t+1)-1];
scv_38=X[-c7+c8][-c7+(c9t+2)];
scv_39=A[-c7+c8][-c7+(c9t+3)-1];
scv_40=X[-c7+c8][-c7+(c9t+3)-1];
scv_41=X[-c7+c8][-c7+(c9t+3)];
scv_42=B[-c7+c8-1][-c7+c9t-1];
scv_43=X[-c7+c8][-c7+(c9t+1)-1];
scv_44=B[-c7+c8][-c7+c9t-1];
scv_45=B[-c7+c8-1][-c7+(c9t+1)-1];
scv_46=X[-c7+c8][-c7+(c9t+1)];
scv_47=B[-c7+c8][-c7+(c9t+2)];
scv_48=B[-c7+c8][-c7+(c9t+1)];
scv_49=A[-c7+c8][-c7+c9t-1];
scv_50=A[-c7+c8][-c7+(c9t+3)];
scv_51=A[-c7+c8][-c7+(c9t+2)-1];
scv_52=B[-c7+c8][-c7+(c9t+3)];
scv_53=A[-c7+c8][-c7+(c9t+1)-1];
scv_54=B[-c7+c8][-c7+(c9t+3)-1];
scv_55=B[-c7+c8-1][-c7+(c9t+3)-1];
scv_56=A[-c7+c8][-c7+c9t];
scv_57=A[-c7+c8][-c7+(c9t+1)];
scv_58=B[-c7+c8][-c7+(c9t+2)-1];
scv_59=A[-c7+c8][-c7+(c9t+2)];
scv_60=X[-c7+c8][-c7+c9t];
scv_61=B[-c7+c8][-c7+c9t];
scv_62=X[-c7+c8][-c7+(c9t+2)-1];
scv_63=X[-c7+c8][-c7+c9t-1];
scv_60=scv_60-scv_63*scv_56/scv_44;
scv_46=scv_46-scv_43*scv_57/scv_37;
scv_38=scv_38-scv_62*scv_59/scv_58;
scv_41=scv_41-scv_40*scv_50/scv_54;
scv_61=scv_61-scv_56*scv_56/scv_44;
scv_48=scv_48-scv_57*scv_57/scv_37;
scv_47=scv_47-scv_59*scv_59/scv_58;
scv_52=scv_52-scv_50*scv_50/scv_54;
scv_63=scv_63-X[-c7+c8-1][-c7+c9t-1]*scv_49/scv_42;
scv_43=scv_43-X[-c7+c8-1][-c7+(c9t+1)-1]*scv_53/scv_45;
scv_62=scv_62-X[-c7+c8-1][-c7+(c9t+2)-1]*scv_51/scv_36;
scv_40=scv_40-X[-c7+c8-1][-c7+(c9t+3)-1]*scv_39/scv_55;
scv_44=scv_44-scv_49*scv_49/scv_42;
scv_37=scv_37-scv_53*scv_53/scv_45;
scv_58=scv_58-scv_51*scv_51/scv_36;
scv_54=scv_54-scv_39*scv_39/scv_55;
B[-c7+c8][-c7+(c9t+1)-1]=scv_37;
X[-c7+c8][-c7+(c9t+2)]=scv_38;
X[-c7+c8][-c7+(c9t+3)-1]=scv_40;
X[-c7+c8][-c7+(c9t+3)]=scv_41;
X[-c7+c8][-c7+(c9t+1)-1]=scv_43;
B[-c7+c8][-c7+c9t-1]=scv_44;
X[-c7+c8][-c7+(c9t+1)]=scv_46;
B[-c7+c8][-c7+(c9t+2)]=scv_47;
B[-c7+c8][-c7+(c9t+1)]=scv_48;
B[-c7+c8][-c7+(c9t+3)]=scv_52;
B[-c7+c8][-c7+(c9t+3)-1]=scv_54;
B[-c7+c8][-c7+(c9t+2)-1]=scv_58;
X[-c7+c8][-c7+c9t]=scv_60;
B[-c7+c8][-c7+c9t]=scv_61;
X[-c7+c8][-c7+(c9t+2)-1]=scv_62;
X[-c7+c8][-c7+c9t-1]=scv_63;
}
}
register int cbv_8;
cbv_8=newub_c8-3;
#pragma ivdep
#pragma vector always
for (c8t=newlb_c8; c8t<=cbv_8; c8t=c8t+4) {
double scv_64, scv_65, scv_66, scv_67, scv_68, scv_69, scv_70, scv_71;
double scv_72, scv_73, scv_74, scv_75, scv_76, scv_77, scv_78, scv_79;
double scv_80, scv_81, scv_82, scv_83, scv_84, scv_85, scv_86, scv_87;
double scv_88, scv_89, scv_90, scv_91, scv_92, scv_93, scv_94, scv_95;
double scv_96, scv_97, scv_98, scv_99, scv_100, scv_101, scv_102, scv_103;
double scv_104, scv_105, scv_106, scv_107, scv_108, scv_109, scv_110, scv_111;
double scv_112, scv_113, scv_114, scv_115, scv_116, scv_117, scv_118, scv_119;
double scv_120, scv_121, scv_122, scv_123, scv_124, scv_125, scv_126, scv_127;
double scv_128, scv_129, scv_130, scv_131, scv_132, scv_133, scv_134, scv_135;
double scv_136, scv_137, scv_138, scv_139, scv_140, scv_141, scv_142, scv_143;
double scv_144, scv_145, scv_146, scv_147, scv_148, scv_149, scv_150, scv_151;
double scv_152, scv_153, scv_154, scv_155, scv_156, scv_157, scv_158, scv_159;
double scv_160, scv_161, scv_162, scv_163, scv_164, scv_165, scv_166, scv_167;
double scv_168, scv_169, scv_170, scv_171, scv_172, scv_173, scv_174, scv_175;
double scv_176, scv_177, scv_178, scv_179, scv_180, scv_181, scv_182, scv_183;
double scv_184, scv_185, scv_186, scv_187, scv_188, scv_189, scv_190, scv_191;
double scv_192, scv_193, scv_194, scv_195, scv_196, scv_197, scv_198, scv_199;
double scv_200, scv_201, scv_202, scv_203, scv_204, scv_205, scv_206, scv_207;
double scv_208, scv_209, scv_210, scv_211, scv_212, scv_213, scv_214, scv_215;
double scv_216, scv_217, scv_218, scv_219, scv_220, scv_221, scv_222, scv_223;
double scv_224, scv_225, scv_226, scv_227, scv_228, scv_229, scv_230, scv_231;
double scv_232, scv_233, scv_234, scv_235, scv_236, scv_237, scv_238, scv_239;
double scv_240, scv_241, scv_242, scv_243, scv_244, scv_245, scv_246, scv_247;
double scv_248, scv_249, scv_250, scv_251, scv_252, scv_253, scv_254, scv_255;
double scv_256, scv_257, scv_258, scv_259, scv_260, scv_261, scv_262, scv_263;
double scv_264, scv_265, scv_266, scv_267, scv_268, scv_269, scv_270, scv_271;
double scv_272, scv_273, scv_274, scv_275, scv_276, scv_277, scv_278, scv_279;
double scv_280, scv_281, scv_282, scv_283, scv_284, scv_285, scv_286, scv_287;
double scv_288, scv_289, scv_290, scv_291, scv_292, scv_293, scv_294, scv_295;
double scv_296, scv_297, scv_298, scv_299, scv_300, scv_301, scv_302, scv_303;
double scv_304, scv_305, scv_306, scv_307, scv_308, scv_309, scv_310, scv_311;
double scv_312, scv_313, scv_314, scv_315, scv_316, scv_317, scv_318, scv_319;
double scv_320, scv_321, scv_322, scv_323, scv_324, scv_325, scv_326, scv_327;
double scv_328, scv_329, scv_330, scv_331, scv_332, scv_333, scv_334, scv_335;
double scv_336, scv_337, scv_338, scv_339, scv_340, scv_341, scv_342, scv_343;
double scv_344, scv_345, scv_346, scv_347, scv_348, scv_349, scv_350, scv_351;
double scv_352, scv_353, scv_354, scv_355, scv_356, scv_357, scv_358, scv_359;
double scv_360, scv_361, scv_362, scv_363, scv_364, scv_365, scv_366, scv_367;
double scv_368, scv_369, scv_370, scv_371, scv_372, scv_373, scv_374, scv_375;
double scv_376, scv_377, scv_378, scv_379, scv_380, scv_381, scv_382, scv_383;
double scv_384, scv_385, scv_386, scv_387, scv_388, scv_389, scv_390, scv_391;
double scv_392, scv_393, scv_394, scv_395, scv_396, scv_397, scv_398, scv_399;
double scv_400, scv_401, scv_402, scv_403, scv_404, scv_405, scv_406, scv_407;
double scv_408, scv_409, scv_410, scv_411, scv_412, scv_413, scv_414, scv_415;
double scv_416, scv_417, scv_418, scv_419, scv_420, scv_421, scv_422, scv_423;
double scv_424, scv_425, scv_426, scv_427, scv_428, scv_429, scv_430, scv_431;
double scv_432, scv_433, scv_434, scv_435, scv_436, scv_437, scv_438, scv_439;
double scv_440, scv_441, scv_442, scv_443, scv_444, scv_445, scv_446, scv_447;
double scv_448, scv_449, scv_450, scv_451, scv_452, scv_453, scv_454, scv_455;
double scv_456, scv_457, scv_458, scv_459, scv_460, scv_461, scv_462, scv_463;
double scv_464, scv_465, scv_466, scv_467, scv_468, scv_469, scv_470, scv_471;
double scv_472, scv_473, scv_474, scv_475, scv_476, scv_477, scv_478, scv_479;
double scv_480, scv_481, scv_482, scv_483, scv_484, scv_485, scv_486, scv_487;
double scv_488, scv_489, scv_490, scv_491, scv_492, scv_493, scv_494, scv_495;
double scv_496, scv_497, scv_498, scv_499, scv_500, scv_501, scv_502, scv_503;
double scv_504, scv_505, scv_506, scv_507, scv_508, scv_509, scv_510, scv_511;
scv_64=A[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+2)-1];
scv_65=B[-(c7t+2)+(c8t+2)-1][-(c7t+2)+(c9t+2)-1];
scv_66=A[-c7t+(c8t+2)][-c7t+(c9t+2)];
scv_67=X[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+3)-1];
scv_68=B[-(c7t+3)+c8t][-(c7t+3)+(c9t+2)-1];
scv_69=A[-c7t+(c8t+2)][-c7t+(c9t+2)-1];
scv_70=B[-(c7t+3)+c8t][-(c7t+3)+(c9t+1)];
scv_71=X[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+3)-1];
scv_72=X[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+1)-1];
scv_73=X[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+3)];
scv_74=A[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+3)];
scv_75=B[-(c7t+1)+(c8t+3)-1][-(c7t+1)+(c9t+3)-1];
scv_76=B[-(c7t+2)+(c8t+3)-1][-(c7t+2)+(c9t+2)-1];
scv_77=B[-(c7t+1)+c8t][-(c7t+1)+(c9t+2)];
scv_78=X[-(c7t+2)+c8t][-(c7t+2)+c9t-1];
scv_79=X[-(c7t+3)+c8t][-(c7t+3)+c9t-1];
scv_80=A[-c7t+(c8t+2)][-c7t+(c9t+1)];
scv_81=X[-c7t+(c8t+2)][-c7t+(c9t+1)-1];
scv_82=X[-c7t+c8t][-c7t+(c9t+1)-1];
scv_83=B[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+3)-1];
scv_84=A[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+1)-1];
scv_85=B[-c7t+(c8t+1)][-c7t+c9t];
scv_86=B[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+3)];
scv_87=X[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+1)-1];
scv_88=X[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+2)-1];
scv_89=A[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+2)];
scv_90=B[-(c7t+2)+c8t][-(c7t+2)+(c9t+3)-1];
scv_91=A[-c7t+(c8t+1)][-c7t+(c9t+2)-1];
scv_92=A[-(c7t+2)+(c8t+2)][-(c7t+2)+c9t];
scv_93=X[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+2)];
scv_94=A[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+1)];
scv_95=A[-(c7t+1)+c8t][-(c7t+1)+c9t];
scv_96=B[-c7t+(c8t+3)][-c7t+(c9t+1)-1];
scv_97=B[-(c7t+3)+(c8t+1)][-(c7t+3)+c9t];
scv_98=A[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+1)];
scv_99=A[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+2)-1];
scv_100=B[-c7t+(c8t+2)][-c7t+(c9t+1)-1];
scv_101=B[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+3)-1];
scv_102=X[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+3)];
scv_103=A[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+3)];
scv_104=X[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+2)-1];
scv_105=X[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+2)-1];
scv_106=B[-(c7t+1)+c8t][-(c7t+1)+(c9t+2)-1];
scv_107=B[-c7t+c8t][-c7t+c9t-1];
scv_108=X[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+1)];
scv_109=A[-c7t+c8t][-c7t+(c9t+2)-1];
scv_110=B[-(c7t+2)+(c8t+1)-1][-(c7t+2)+c9t-1];
scv_111=A[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+1)-1];
scv_112=A[-c7t+c8t][-c7t+(c9t+1)-1];
scv_113=B[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+1)-1];
scv_114=A[-(c7t+2)+c8t][-(c7t+2)+(c9t+2)-1];
scv_115=A[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+1)-1];
scv_116=A[-(c7t+3)+(c8t+3)][-(c7t+3)+c9t-1];
scv_117=A[-(c7t+2)+c8t][-(c7t+2)+c9t-1];
scv_118=X[-(c7t+1)+(c8t+2)][-(c7t+1)+c9t];
scv_119=B[-c7t+(c8t+2)][-c7t+(c9t+3)-1];
scv_120=B[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+2)-1];
scv_121=B[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+1)-1];
scv_122=B[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+3)-1];
scv_123=X[-c7t+(c8t+2)][-c7t+(c9t+1)];
scv_124=B[-(c7t+3)+(c8t+2)][-(c7t+3)+c9t];
scv_125=B[-(c7t+3)+(c8t+1)-1][-(c7t+3)+c9t-1];
scv_126=B[-c7t+(c8t+1)][-c7t+(c9t+3)-1];
scv_127=A[-(c7t+2)+c8t][-(c7t+2)+(c9t+2)];
scv_128=X[-c7t+c8t][-c7t+c9t-1];
scv_129=B[-(c7t+1)+(c8t+3)-1][-(c7t+1)+(c9t+2)-1];
scv_130=X[-(c7t+2)+c8t][-(c7t+2)+(c9t+3)];
scv_131=B[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+3)-1];
scv_132=X[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+3)];
scv_133=B[-(c7t+3)+(c8t+1)-1][-(c7t+3)+(c9t+3)-1];
scv_134=B[-(c7t+3)+(c8t+3)-1][-(c7t+3)+(c9t+2)-1];
scv_135=B[-(c7t+2)+c8t-1][-(c7t+2)+(c9t+1)-1];
scv_136=X[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+1)];
scv_137=X[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+2)];
scv_138=B[-(c7t+3)+(c8t+2)-1][-(c7t+3)+(c9t+2)-1];
scv_139=X[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+1)-1];
scv_140=X[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+3)-1];
scv_141=X[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+1)];
scv_142=B[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+1)-1];
scv_143=B[-(c7t+2)+c8t-1][-(c7t+2)+(c9t+3)-1];
scv_144=A[-c7t+(c8t+1)][-c7t+(c9t+3)];
scv_145=A[-c7t+c8t][-c7t+(c9t+3)-1];
scv_146=A[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+1)-1];
scv_147=X[-c7t+(c8t+3)][-c7t+(c9t+3)-1];
scv_148=X[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+1)];
scv_149=X[-c7t+(c8t+3)][-c7t+(c9t+1)];
scv_150=B[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+3)];
scv_151=A[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+3)];
scv_152=X[-c7t+(c8t+1)][-c7t+(c9t+3)];
scv_153=B[-(c7t+1)+(c8t+2)][-(c7t+1)+c9t-1];
scv_154=B[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+3)];
scv_155=B[-(c7t+2)+c8t-1][-(c7t+2)+c9t-1];
scv_156=A[-c7t+(c8t+1)][-c7t+c9t];
scv_157=X[-c7t+(c8t+2)][-c7t+c9t-1];
scv_158=A[-c7t+c8t][-c7t+(c9t+2)];
scv_159=A[-(c7t+2)+(c8t+3)][-(c7t+2)+c9t];
scv_160=X[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+2)];
scv_161=B[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+3)-1];
scv_162=X[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+2)-1];
scv_163=X[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+2)];
scv_164=X[-c7t+(c8t+3)][-c7t+c9t-1];
scv_165=B[-c7t+(c8t+3)-1][-c7t+(c9t+1)-1];
scv_166=X[-(c7t+2)+c8t][-(c7t+2)+(c9t+1)-1];
scv_167=B[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+1)];
scv_168=B[-(c7t+3)+c8t-1][-(c7t+3)+c9t-1];
scv_169=X[-(c7t+2)+(c8t+1)][-(c7t+2)+c9t];
scv_170=B[-c7t+(c8t+1)-1][-c7t+(c9t+3)-1];
scv_171=X[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+3)];
scv_172=B[-(c7t+1)+c8t-1][-(c7t+1)+(c9t+1)-1];
scv_173=B[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+2)];
scv_174=B[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+2)];
scv_175=A[-(c7t+1)+(c8t+3)][-(c7t+1)+c9t-1];
scv_176=X[-c7t+(c8t+3)][-c7t+(c9t+2)];
scv_177=B[-c7t+(c8t+1)][-c7t+(c9t+1)];
scv_178=B[-(c7t+3)+c8t][-(c7t+3)+c9t];
scv_179=A[-(c7t+2)+c8t][-(c7t+2)+(c9t+3)];
scv_180=B[-(c7t+1)+(c8t+1)-1][-(c7t+1)+(c9t+3)-1];
scv_181=X[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+3)-1];
scv_182=A[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+2)-1];
scv_183=B[-c7t+c8t][-c7t+(c9t+3)-1];
scv_184=A[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+1)-1];
scv_185=X[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+3)];
scv_186=B[-(c7t+1)+(c8t+1)][-(c7t+1)+c9t-1];
scv_187=B[-c7t+(c8t+1)][-c7t+(c9t+2)];
scv_188=B[-(c7t+1)+c8t][-(c7t+1)+c9t-1];
scv_189=A[-(c7t+1)+(c8t+2)][-(c7t+1)+c9t];
scv_190=X[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+3)];
scv_191=X[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+2)-1];
scv_192=B[-(c7t+2)+(c8t+1)-1][-(c7t+2)+(c9t+1)-1];
scv_193=B[-c7t+(c8t+1)][-c7t+(c9t+1)-1];
scv_194=B[-(c7t+3)+c8t-1][-(c7t+3)+(c9t+1)-1];
scv_195=B[-(c7t+3)+(c8t+3)][-(c7t+3)+c9t-1];
scv_196=A[-(c7t+2)+(c8t+1)][-(c7t+2)+c9t-1];
scv_197=X[-(c7t+1)+c8t][-(c7t+1)+(c9t+2)-1];
scv_198=B[-c7t+(c8t+2)][-c7t+c9t];
scv_199=A[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+1)-1];
scv_200=B[-(c7t+2)+c8t][-(c7t+2)+(c9t+1)-1];
scv_201=B[-c7t+(c8t+2)-1][-c7t+(c9t+3)-1];
scv_202=X[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+1)];
scv_203=B[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+1)-1];
scv_204=B[-c7t+c8t-1][-c7t+c9t-1];
scv_205=A[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+1)-1];
scv_206=A[-(c7t+3)+(c8t+1)][-(c7t+3)+c9t];
scv_207=X[-(c7t+1)+c8t][-(c7t+1)+(c9t+1)-1];
scv_208=X[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+1)];
scv_209=B[-(c7t+2)+c8t][-(c7t+2)+(c9t+1)];
scv_210=X[-c7t+(c8t+2)][-c7t+(c9t+3)];
scv_211=B[-c7t+(c8t+3)-1][-c7t+(c9t+3)-1];
scv_212=A[-(c7t+1)+c8t][-(c7t+1)+(c9t+2)];
scv_213=X[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+3)];
scv_214=X[-(c7t+1)+(c8t+2)][-(c7t+1)+c9t-1];
scv_215=X[-c7t+c8t][-c7t+(c9t+2)-1];
scv_216=X[-c7t+(c8t+1)][-c7t+(c9t+1)];
scv_217=B[-c7t+c8t][-c7t+(c9t+3)];
scv_218=B[-(c7t+3)+(c8t+1)-1][-(c7t+3)+(c9t+1)-1];
scv_219=B[-(c7t+2)+(c8t+3)][-(c7t+2)+c9t-1];
scv_220=A[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+3)];
scv_221=B[-c7t+c8t-1][-c7t+(c9t+2)-1];
scv_222=A[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+3)-1];
scv_223=B[-(c7t+1)+(c8t+2)-1][-(c7t+1)+(c9t+2)-1];
scv_224=X[-(c7t+3)+c8t][-(c7t+3)+(c9t+3)];
scv_225=A[-(c7t+1)+c8t][-(c7t+1)+(c9t+2)-1];
scv_226=B[-(c7t+3)+c8t][-(c7t+3)+(c9t+1)-1];
scv_227=A[-(c7t+2)+c8t][-(c7t+2)+(c9t+1)];
scv_228=B[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+3)];
scv_229=X[-c7t+(c8t+3)][-c7t+(c9t+3)];
scv_230=B[-(c7t+1)+c8t][-(c7t+1)+(c9t+1)-1];
scv_231=B[-(c7t+1)+(c8t+1)][-(c7t+1)+c9t];
scv_232=B[-(c7t+2)+(c8t+3)][-(c7t+2)+c9t];
scv_233=X[-c7t+(c8t+3)][-c7t+(c9t+1)-1];
scv_234=X[-(c7t+3)+(c8t+1)][-(c7t+3)+c9t-1];
scv_235=B[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+3)-1];
scv_236=A[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+1)];
scv_237=B[-c7t+(c8t+3)][-c7t+(c9t+3)-1];
scv_238=B[-(c7t+1)+(c8t+3)][-(c7t+1)+c9t];
scv_239=A[-c7t+(c8t+2)][-c7t+c9t-1];
scv_240=A[-(c7t+3)+(c8t+3)][-(c7t+3)+c9t];
scv_241=A[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+2)];
scv_242=A[-(c7t+2)+c8t][-(c7t+2)+c9t];
scv_243=B[-(c7t+2)+c8t][-(c7t+2)+c9t-1];
scv_244=B[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+1)-1];
scv_245=A[-(c7t+1)+c8t][-(c7t+1)+c9t-1];
scv_246=B[-c7t+(c8t+1)][-c7t+(c9t+3)];
scv_247=A[-(c7t+3)+(c8t+2)][-(c7t+3)+c9t-1];
scv_248=B[-(c7t+2)+(c8t+2)-1][-(c7t+2)+c9t-1];
scv_249=X[-(c7t+1)+(c8t+1)][-(c7t+1)+c9t];
scv_250=X[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+3)-1];
scv_251=B[-c7t+(c8t+3)][-c7t+c9t-1];
scv_252=B[-(c7t+1)+c8t-1][-(c7t+1)+(c9t+2)-1];
scv_253=A[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+3)-1];
scv_254=X[-(c7t+2)+c8t][-(c7t+2)+c9t];
scv_255=X[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+2)];
scv_256=B[-(c7t+3)+c8t-1][-(c7t+3)+(c9t+2)-1];
scv_257=X[-(c7t+1)+(c8t+3)][-(c7t+1)+c9t-1];
scv_258=A[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+3)-1];
scv_259=B[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+2)];
scv_260=B[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+1)];
scv_261=X[-(c7t+3)+c8t][-(c7t+3)+(c9t+3)-1];
scv_262=B[-(c7t+2)+c8t][-(c7t+2)+(c9t+3)];
scv_263=A[-(c7t+1)+c8t][-(c7t+1)+(c9t+1)-1];
scv_264=A[-c7t+(c8t+2)][-c7t+c9t];
scv_265=X[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+1)];
scv_266=A[-c7t+(c8t+3)][-c7t+(c9t+2)-1];
scv_267=X[-c7t+(c8t+2)][-c7t+(c9t+2)];
scv_268=A[-(c7t+1)+c8t][-(c7t+1)+(c9t+3)];
scv_269=A[-c7t+c8t][-c7t+c9t-1];
scv_270=A[-c7t+(c8t+3)][-c7t+c9t-1];
scv_271=A[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+2)];
scv_272=X[-(c7t+3)+c8t][-(c7t+3)+(c9t+1)];
scv_273=A[-(c7t+2)+(c8t+2)][-(c7t+2)+c9t-1];
scv_274=B[-c7t+c8t][-c7t+(c9t+2)];
scv_275=A[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+2)];
scv_276=A[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+1)];
scv_277=B[-(c7t+2)+(c8t+1)][-(c7t+2)+c9t];
scv_278=A[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+1)];
scv_279=X[-(c7t+3)+c8t][-(c7t+3)+(c9t+2)];
scv_280=A[-(c7t+2)+c8t][-(c7t+2)+(c9t+3)-1];
scv_281=X[-(c7t+2)+(c8t+3)][-(c7t+2)+c9t];
scv_282=A[-c7t+(c8t+2)][-c7t+(c9t+3)-1];
scv_283=B[-c7t+(c8t+2)][-c7t+(c9t+1)];
scv_284=B[-(c7t+1)+(c8t+3)][-(c7t+1)+c9t-1];
scv_285=B[-c7t+(c8t+3)][-c7t+c9t];
scv_286=A[-(c7t+1)+(c8t+3)][-(c7t+1)+c9t];
scv_287=A[-c7t+(c8t+3)][-c7t+(c9t+1)];
scv_288=B[-(c7t+1)+c8t][-(c7t+1)+(c9t+3)-1];
scv_289=X[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+1)];
scv_290=A[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+1)];
scv_291=X[-(c7t+1)+c8t][-(c7t+1)+c9t];
scv_292=A[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+3)];
scv_293=B[-(c7t+3)+(c8t+1)][-(c7t+3)+c9t-1];
scv_294=B[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+1)];
scv_295=X[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+2)-1];
scv_296=A[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+2)-1];
scv_297=X[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+1)-1];
scv_298=A[-c7t+(c8t+3)][-c7t+c9t];
scv_299=X[-c7t+(c8t+1)][-c7t+(c9t+2)-1];
scv_300=B[-(c7t+1)+(c8t+1)-1][-(c7t+1)+(c9t+1)-1];
scv_301=B[-(c7t+2)+(c8t+3)-1][-(c7t+2)+(c9t+3)-1];
scv_302=B[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+2)-1];
scv_303=B[-(c7t+1)+c8t-1][-(c7t+1)+(c9t+3)-1];
scv_304=B[-(c7t+2)+(c8t+1)-1][-(c7t+2)+(c9t+3)-1];
scv_305=X[-(c7t+2)+(c8t+3)][-(c7t+2)+c9t-1];
scv_306=B[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+2)-1];
scv_307=A[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+3)-1];
scv_308=X[-c7t+(c8t+1)][-c7t+c9t];
scv_309=X[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+2)-1];
scv_310=B[-c7t+(c8t+2)-1][-c7t+(c9t+1)-1];
scv_311=B[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+3)];
scv_312=X[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+1)-1];
scv_313=B[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+1)-1];
scv_314=B[-(c7t+2)+c8t][-(c7t+2)+(c9t+2)];
scv_315=A[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+3)-1];
scv_316=X[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+2)];
scv_317=X[-c7t+(c8t+1)][-c7t+(c9t+1)-1];
scv_318=B[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+1)-1];
scv_319=A[-(c7t+3)+c8t][-(c7t+3)+c9t];
scv_320=A[-(c7t+1)+c8t][-(c7t+1)+(c9t+1)];
scv_321=X[-(c7t+1)+c8t][-(c7t+1)+(c9t+1)];
scv_322=B[-c7t+(c8t+3)][-c7t+(c9t+2)];
scv_323=B[-(c7t+2)+c8t][-(c7t+2)+(c9t+2)-1];
scv_324=X[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+1)-1];
scv_325=B[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+2)-1];
scv_326=X[-(c7t+1)+c8t][-(c7t+1)+(c9t+2)];
scv_327=A[-c7t+c8t][-c7t+c9t];
scv_328=B[-c7t+(c8t+2)][-c7t+(c9t+2)];
scv_329=X[-c7t+(c8t+2)][-c7t+(c9t+2)-1];
scv_330=X[-c7t+c8t][-c7t+(c9t+3)];
scv_331=A[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+3)];
scv_332=B[-(c7t+2)+(c8t+2)-1][-(c7t+2)+(c9t+3)-1];
scv_333=A[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+3)];
scv_334=X[-c7t+(c8t+2)][-c7t+(c9t+3)-1];
scv_335=A[-(c7t+3)+c8t][-(c7t+3)+(c9t+3)];
scv_336=X[-c7t+(c8t+1)][-c7t+(c9t+2)];
scv_337=A[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+2)-1];
scv_338=X[-(c7t+2)+c8t][-(c7t+2)+(c9t+3)-1];
scv_339=A[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+2)];
scv_340=X[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+2)-1];
scv_341=B[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+2)-1];
scv_342=B[-c7t+(c8t+1)-1][-c7t+(c9t+2)-1];
scv_343=B[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+2)];
scv_344=X[-(c7t+3)+(c8t+1)][-(c7t+3)+c9t];
scv_345=B[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+2)-1];
scv_346=A[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+3)-1];
scv_347=X[-(c7t+3)+c8t][-(c7t+3)+(c9t+1)-1];
scv_348=A[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+2)];
scv_349=A[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+2)-1];
scv_350=B[-(c7t+3)+c8t][-(c7t+3)+(c9t+3)];
scv_351=B[-(c7t+3)+(c8t+2)-1][-(c7t+3)+(c9t+3)-1];
scv_352=X[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+2)];
scv_353=B[-(c7t+3)+(c8t+3)-1][-(c7t+3)+c9t-1];
scv_354=A[-(c7t+2)+(c8t+1)][-(c7t+2)+c9t];
scv_355=A[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+1)];
scv_356=B[-c7t+(c8t+2)-1][-c7t+c9t-1];
scv_357=A[-c7t+(c8t+1)][-c7t+c9t-1];
scv_358=B[-c7t+c8t][-c7t+(c9t+1)-1];
scv_359=A[-c7t+(c8t+3)][-c7t+(c9t+3)];
scv_360=B[-c7t+(c8t+3)-1][-c7t+c9t-1];
scv_361=B[-(c7t+1)+(c8t+2)-1][-(c7t+1)+c9t-1];
scv_362=B[-c7t+(c8t+2)][-c7t+c9t-1];
scv_363=X[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+3)];
scv_364=A[-(c7t+2)+c8t][-(c7t+2)+(c9t+1)-1];
scv_365=X[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+1)-1];
scv_366=X[-(c7t+1)+c8t][-(c7t+1)+(c9t+3)-1];
scv_367=X[-(c7t+2)+(c8t+2)][-(c7t+2)+c9t];
scv_368=B[-(c7t+3)+c8t-1][-(c7t+3)+(c9t+3)-1];
scv_369=A[-(c7t+1)+(c8t+1)][-(c7t+1)+c9t];
scv_370=X[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+3)-1];
scv_371=B[-c7t+c8t][-c7t+(c9t+2)-1];
scv_372=B[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+2)-1];
scv_373=B[-c7t+(c8t+3)][-c7t+(c9t+3)];
scv_374=B[-c7t+(c8t+2)][-c7t+(c9t+2)-1];
scv_375=A[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+2)-1];
scv_376=A[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+2)];
scv_377=B[-c7t+(c8t+1)][-c7t+(c9t+2)-1];
scv_378=X[-c7t+c8t][-c7t+(c9t+2)];
scv_379=A[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+3)-1];
scv_380=B[-c7t+c8t-1][-c7t+(c9t+1)-1];
scv_381=B[-(c7t+3)+c8t][-(c7t+3)+(c9t+3)-1];
scv_382=B[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+2)];
scv_383=B[-(c7t+1)+c8t][-(c7t+1)+c9t];
scv_384=B[-c7t+c8t][-c7t+(c9t+1)];
scv_385=X[-c7t+c8t][-c7t+(c9t+1)];
scv_386=B[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+3)];
scv_387=X[-c7t+(c8t+1)][-c7t+(c9t+3)-1];
scv_388=B[-(c7t+2)+c8t][-(c7t+2)+c9t];
scv_389=B[-(c7t+1)+(c8t+1)-1][-(c7t+1)+c9t-1];
scv_390=B[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+2)-1];
scv_391=X[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+3)-1];
scv_392=B[-(c7t+3)+(c8t+2)-1][-(c7t+3)+c9t-1];
scv_393=B[-c7t+c8t][-c7t+c9t];
scv_394=X[-(c7t+2)+c8t][-(c7t+2)+(c9t+2)];
scv_395=B[-c7t+(c8t+3)-1][-c7t+(c9t+2)-1];
scv_396=X[-(c7t+3)+(c8t+2)][-(c7t+3)+c9t-1];
scv_397=B[-(c7t+1)+(c8t+3)-1][-(c7t+1)+c9t-1];
scv_398=B[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+3)];
scv_399=A[-(c7t+3)+c8t][-(c7t+3)+c9t-1];
scv_400=B[-c7t+c8t-1][-c7t+(c9t+3)-1];
scv_401=B[-(c7t+3)+(c8t+2)-1][-(c7t+3)+(c9t+1)-1];
scv_402=A[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+3)];
scv_403=A[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+3)-1];
scv_404=A[-c7t+(c8t+2)][-c7t+(c9t+3)];
scv_405=B[-(c7t+3)+c8t][-(c7t+3)+(c9t+2)];
scv_406=X[-(c7t+2)+c8t][-(c7t+2)+(c9t+2)-1];
scv_407=X[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+1)-1];
scv_408=B[-(c7t+3)+(c8t+3)-1][-(c7t+3)+(c9t+3)-1];
scv_409=B[-(c7t+1)+(c8t+3)-1][-(c7t+1)+(c9t+1)-1];
scv_410=X[-(c7t+2)+c8t][-(c7t+2)+(c9t+1)];
scv_411=B[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+2)];
scv_412=B[-(c7t+3)+(c8t+2)][-(c7t+3)+c9t-1];
scv_413=A[-c7t+(c8t+3)][-c7t+(c9t+2)];
scv_414=B[-(c7t+1)+c8t][-(c7t+1)+(c9t+1)];
scv_415=B[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+1)];
scv_416=B[-(c7t+2)+(c8t+2)][-(c7t+2)+c9t];
scv_417=B[-(c7t+1)+c8t-1][-(c7t+1)+c9t-1];
scv_418=X[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+3)-1];
scv_419=X[-c7t+c8t][-c7t+(c9t+3)-1];
scv_420=B[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+2)-1];
scv_421=B[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+3)-1];
scv_422=X[-c7t+(c8t+1)][-c7t+c9t-1];
scv_423=X[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+2)];
scv_424=B[-c7t+(c8t+1)][-c7t+c9t-1];
scv_425=A[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+2)-1];
scv_426=A[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+1)-1];
scv_427=A[-(c7t+1)+(c8t+2)][-(c7t+1)+c9t-1];
scv_428=A[-(c7t+3)+c8t][-(c7t+3)+(c9t+3)-1];
scv_429=A[-c7t+(c8t+3)][-c7t+(c9t+3)-1];
scv_430=B[-(c7t+2)+(c8t+2)][-(c7t+2)+c9t-1];
scv_431=X[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+2)-1];
scv_432=B[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+1)];
scv_433=B[-c7t+(c8t+2)-1][-c7t+(c9t+2)-1];
scv_434=A[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+1)];
scv_435=B[-(c7t+3)+(c8t+3)-1][-(c7t+3)+(c9t+1)-1];
scv_436=B[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+1)];
scv_437=A[-(c7t+3)+(c8t+2)][-(c7t+3)+c9t];
scv_438=B[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+2)];
scv_439=B[-c7t+(c8t+1)-1][-c7t+c9t-1];
scv_440=B[-(c7t+1)+(c8t+2)-1][-(c7t+1)+(c9t+3)-1];
scv_441=B[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+2)];
scv_442=B[-(c7t+3)+(c8t+3)][-(c7t+3)+c9t];
scv_443=B[-c7t+(c8t+2)][-c7t+(c9t+3)];
scv_444=B[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+3)];
scv_445=X[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+3)-1];
scv_446=A[-c7t+(c8t+1)][-c7t+(c9t+1)];
scv_447=B[-c7t+(c8t+1)-1][-c7t+(c9t+1)-1];
scv_448=B[-(c7t+2)+(c8t+3)-1][-(c7t+2)+(c9t+1)-1];
scv_449=B[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+1)];
scv_450=X[-c7t+c8t][-c7t+c9t];
scv_451=A[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+3)-1];
scv_452=X[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+1)];
scv_453=B[-(c7t+1)+(c8t+2)-1][-(c7t+1)+(c9t+1)-1];
scv_454=X[-(c7t+3)+(c8t+3)][-(c7t+3)+c9t-1];
scv_455=X[-(c7t+1)+c8t][-(c7t+1)+c9t-1];
scv_456=A[-(c7t+1)+c8t][-(c7t+1)+(c9t+3)-1];
scv_457=A[-(c7t+3)+c8t][-(c7t+3)+(c9t+2)];
scv_458=B[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+1)];
scv_459=B[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+1)-1];
scv_460=A[-c7t+c8t][-c7t+(c9t+1)];
scv_461=B[-(c7t+3)+(c8t+1)-1][-(c7t+3)+(c9t+2)-1];
scv_462=X[-c7t+(c8t+3)][-c7t+(c9t+2)-1];
scv_463=X[-(c7t+3)+(c8t+3)][-(c7t+3)+c9t];
scv_464=A[-c7t+c8t][-c7t+(c9t+3)];
scv_465=A[-(c7t+3)+c8t][-(c7t+3)+(c9t+2)-1];
scv_466=A[-c7t+(c8t+1)][-c7t+(c9t+1)-1];
scv_467=X[-(c7t+3)+(c8t+2)][-(c7t+3)+c9t];
scv_468=X[-c7t+(c8t+2)][-c7t+c9t];
scv_469=A[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+1)-1];
scv_470=X[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+2)];
scv_471=B[-c7t+(c8t+3)][-c7t+(c9t+1)];
scv_472=B[-(c7t+1)+c8t][-(c7t+1)+(c9t+3)];
scv_473=X[-(c7t+3)+c8t][-(c7t+3)+c9t];
scv_474=A[-(c7t+3)+c8t][-(c7t+3)+(c9t+1)];
scv_475=B[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+1)-1];
scv_476=A[-(c7t+2)+(c8t+3)][-(c7t+2)+c9t-1];
scv_477=A[-(c7t+3)+(c8t+1)][-(c7t+3)+c9t-1];
scv_478=B[-c7t+(c8t+3)][-c7t+(c9t+2)-1];
scv_479=B[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+3)];
scv_480=B[-(c7t+3)+c8t][-(c7t+3)+c9t-1];
scv_481=B[-(c7t+1)+(c8t+2)][-(c7t+1)+c9t];
scv_482=B[-(c7t+2)+c8t-1][-(c7t+2)+(c9t+2)-1];
scv_483=X[-(c7t+1)+(c8t+1)][-(c7t+1)+c9t-1];
scv_484=X[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+1)-1];
scv_485=X[-(c7t+2)+(c8t+2)][-(c7t+2)+c9t-1];
scv_486=B[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+2)];
scv_487=A[-c7t+(c8t+2)][-c7t+(c9t+1)-1];
scv_488=X[-(c7t+2)+(c8t+1)][-(c7t+2)+c9t-1];
scv_489=A[-c7t+(c8t+1)][-c7t+(c9t+3)-1];
scv_490=B[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+3)-1];
scv_491=X[-(c7t+1)+c8t][-(c7t+1)+(c9t+3)];
scv_492=A[-(c7t+1)+(c8t+1)][-(c7t+1)+c9t-1];
scv_493=B[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+1)];
scv_494=B[-(c7t+1)+(c8t+1)-1][-(c7t+1)+(c9t+2)-1];
scv_495=A[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+3)];
scv_496=A[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+2)];
scv_497=X[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+3)];
scv_498=X[-(c7t+1)+(c8t+3)][-(c7t+1)+c9t];
scv_499=B[-(c7t+2)+(c8t+1)-1][-(c7t+2)+(c9t+2)-1];
scv_500=B[-(c7t+2)+(c8t+3)-1][-(c7t+2)+c9t-1];
scv_501=X[-c7t+(c8t+3)][-c7t+c9t];
scv_502=X[-(c7t+3)+c8t][-(c7t+3)+(c9t+2)-1];
scv_503=A[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+2)];
scv_504=B[-(c7t+2)+(c8t+2)-1][-(c7t+2)+(c9t+1)-1];
scv_505=A[-c7t+(c8t+3)][-c7t+(c9t+1)-1];
scv_506=A[-(c7t+3)+c8t][-(c7t+3)+(c9t+1)-1];
scv_507=A[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+2)-1];
scv_508=B[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+3)-1];
scv_509=B[-(c7t+2)+(c8t+1)][-(c7t+2)+c9t-1];
scv_510=A[-c7t+(c8t+1)][-c7t+(c9t+2)];
scv_511=A[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+1)];
scv_450=scv_450-scv_128*scv_327/scv_107;
scv_308=scv_308-scv_422*scv_156/scv_424;
scv_468=scv_468-scv_157*scv_264/scv_362;
scv_501=scv_501-scv_164*scv_298/scv_251;
scv_385=scv_385-scv_82*scv_460/scv_358;
scv_216=scv_216-scv_317*scv_446/scv_193;
scv_123=scv_123-scv_81*scv_80/scv_100;
scv_149=scv_149-scv_233*scv_287/scv_96;
scv_378=scv_378-scv_215*scv_158/scv_371;
scv_336=scv_336-scv_299*scv_510/scv_377;
scv_267=scv_267-scv_329*scv_66/scv_374;
scv_176=scv_176-scv_462*scv_413/scv_478;
scv_330=scv_330-scv_419*scv_464/scv_183;
scv_152=scv_152-scv_387*scv_144/scv_126;
scv_210=scv_210-scv_334*scv_404/scv_119;
scv_229=scv_229-scv_147*scv_359/scv_237;
scv_291=scv_291-scv_455*scv_95/scv_188;
scv_249=scv_249-scv_483*scv_369/scv_186;
scv_118=scv_118-scv_214*scv_189/scv_153;
scv_498=scv_498-scv_257*scv_286/scv_284;
scv_321=scv_321-scv_207*scv_320/scv_230;
scv_148=scv_148-scv_407*scv_236/scv_121;
scv_141=scv_141-scv_297*scv_94/scv_313;
scv_136=scv_136-scv_324*scv_290/scv_475;
scv_326=scv_326-scv_197*scv_212/scv_106;
scv_255=scv_255-scv_340*scv_241/scv_420;
scv_423=scv_423-scv_295*scv_271/scv_372;
scv_163=scv_163-scv_104*scv_348/scv_120;
scv_491=scv_491-scv_366*scv_268/scv_288;
scv_132=scv_132-scv_418*scv_402/scv_235;
scv_363=scv_363-scv_71*scv_151/scv_101;
scv_213=scv_213-scv_250*scv_292/scv_508;
scv_254=scv_254-scv_78*scv_242/scv_243;
scv_169=scv_169-scv_488*scv_354/scv_509;
scv_367=scv_367-scv_485*scv_92/scv_430;
scv_281=scv_281-scv_305*scv_159/scv_219;
scv_410=scv_410-scv_166*scv_227/scv_200;
scv_208=scv_208-scv_139*scv_511/scv_142;
scv_202=scv_202-scv_87*scv_278/scv_318;
scv_452=scv_452-scv_365*scv_434/scv_203;
scv_394=scv_394-scv_406*scv_127/scv_323;
scv_316=scv_316-scv_431*scv_496/scv_345;
scv_93=scv_93-scv_191*scv_275/scv_306;
scv_470=scv_470-scv_88*scv_89/scv_390;
scv_130=scv_130-scv_338*scv_179/scv_90;
scv_190=scv_190-scv_67*scv_220/scv_131;
scv_497=scv_497-scv_370*scv_333/scv_490;
scv_73=scv_73-scv_391*scv_495/scv_122;
scv_473=scv_473-scv_79*scv_319/scv_480;
scv_344=scv_344-scv_234*scv_206/scv_293;
scv_467=scv_467-scv_396*scv_437/scv_412;
scv_463=scv_463-scv_454*scv_240/scv_195;
scv_272=scv_272-scv_347*scv_474/scv_226;
scv_289=scv_289-scv_484*scv_355/scv_113;
scv_265=scv_265-scv_312*scv_98/scv_244;
scv_108=scv_108-scv_72*scv_276/scv_459;
scv_279=scv_279-scv_502*scv_457/scv_68;
scv_352=scv_352-scv_105*scv_339/scv_341;
scv_160=scv_160-scv_309*scv_503/scv_302;
scv_137=scv_137-scv_162*scv_376/scv_325;
scv_224=scv_224-scv_261*scv_335/scv_381;
scv_171=scv_171-scv_181*scv_74/scv_161;
scv_102=scv_102-scv_140*scv_103/scv_421;
scv_185=scv_185-scv_445*scv_331/scv_83;
scv_393=scv_393-scv_327*scv_327/scv_107;
scv_85=scv_85-scv_156*scv_156/scv_424;
scv_198=scv_198-scv_264*scv_264/scv_362;
scv_285=scv_285-scv_298*scv_298/scv_251;
scv_384=scv_384-scv_460*scv_460/scv_358;
scv_177=scv_177-scv_446*scv_446/scv_193;
scv_283=scv_283-scv_80*scv_80/scv_100;
scv_471=scv_471-scv_287*scv_287/scv_96;
scv_274=scv_274-scv_158*scv_158/scv_371;
scv_187=scv_187-scv_510*scv_510/scv_377;
scv_328=scv_328-scv_66*scv_66/scv_374;
scv_322=scv_322-scv_413*scv_413/scv_478;
scv_217=scv_217-scv_464*scv_464/scv_183;
scv_246=scv_246-scv_144*scv_144/scv_126;
scv_443=scv_443-scv_404*scv_404/scv_119;
scv_373=scv_373-scv_359*scv_359/scv_237;
scv_383=scv_383-scv_95*scv_95/scv_188;
scv_231=scv_231-scv_369*scv_369/scv_186;
scv_481=scv_481-scv_189*scv_189/scv_153;
scv_238=scv_238-scv_286*scv_286/scv_284;
scv_414=scv_414-scv_320*scv_320/scv_230;
scv_415=scv_415-scv_236*scv_236/scv_121;
scv_167=scv_167-scv_94*scv_94/scv_313;
scv_458=scv_458-scv_290*scv_290/scv_475;
scv_77=scv_77-scv_212*scv_212/scv_106;
scv_411=scv_411-scv_241*scv_241/scv_420;
scv_173=scv_173-scv_271*scv_271/scv_372;
scv_382=scv_382-scv_348*scv_348/scv_120;
scv_472=scv_472-scv_268*scv_268/scv_288;
scv_479=scv_479-scv_402*scv_402/scv_235;
scv_386=scv_386-scv_151*scv_151/scv_101;
scv_444=scv_444-scv_292*scv_292/scv_508;
scv_388=scv_388-scv_242*scv_242/scv_243;
scv_277=scv_277-scv_354*scv_354/scv_509;
scv_416=scv_416-scv_92*scv_92/scv_430;
scv_232=scv_232-scv_159*scv_159/scv_219;
scv_209=scv_209-scv_227*scv_227/scv_200;
scv_294=scv_294-scv_511*scv_511/scv_142;
scv_432=scv_432-scv_278*scv_278/scv_318;
scv_449=scv_449-scv_434*scv_434/scv_203;
scv_314=scv_314-scv_127*scv_127/scv_323;
scv_259=scv_259-scv_496*scv_496/scv_345;
scv_438=scv_438-scv_275*scv_275/scv_306;
scv_343=scv_343-scv_89*scv_89/scv_390;
scv_262=scv_262-scv_179*scv_179/scv_90;
scv_311=scv_311-scv_220*scv_220/scv_131;
scv_150=scv_150-scv_333*scv_333/scv_490;
scv_398=scv_398-scv_495*scv_495/scv_122;
scv_178=scv_178-scv_319*scv_319/scv_480;
scv_97=scv_97-scv_206*scv_206/scv_293;
scv_124=scv_124-scv_437*scv_437/scv_412;
scv_442=scv_442-scv_240*scv_240/scv_195;
scv_70=scv_70-scv_474*scv_474/scv_226;
scv_493=scv_493-scv_355*scv_355/scv_113;
scv_436=scv_436-scv_98*scv_98/scv_244;
scv_260=scv_260-scv_276*scv_276/scv_459;
scv_405=scv_405-scv_457*scv_457/scv_68;
scv_486=scv_486-scv_339*scv_339/scv_341;
scv_441=scv_441-scv_503*scv_503/scv_302;
scv_174=scv_174-scv_376*scv_376/scv_325;
scv_350=scv_350-scv_335*scv_335/scv_381;
scv_86=scv_86-scv_74*scv_74/scv_161;
scv_154=scv_154-scv_103*scv_103/scv_421;
scv_228=scv_228-scv_331*scv_331/scv_83;
scv_128=scv_128-X[-c7t+c8t-1][-c7t+c9t-1]*scv_269/scv_204;
scv_422=scv_422-X[-c7t+(c8t+1)-1][-c7t+c9t-1]*scv_357/scv_439;
scv_157=scv_157-X[-c7t+(c8t+2)-1][-c7t+c9t-1]*scv_239/scv_356;
scv_164=scv_164-X[-c7t+(c8t+3)-1][-c7t+c9t-1]*scv_270/scv_360;
scv_82=scv_82-X[-c7t+c8t-1][-c7t+(c9t+1)-1]*scv_112/scv_380;
scv_317=scv_317-X[-c7t+(c8t+1)-1][-c7t+(c9t+1)-1]*scv_466/scv_447;
scv_81=scv_81-X[-c7t+(c8t+2)-1][-c7t+(c9t+1)-1]*scv_487/scv_310;
scv_233=scv_233-X[-c7t+(c8t+3)-1][-c7t+(c9t+1)-1]*scv_505/scv_165;
scv_215=scv_215-X[-c7t+c8t-1][-c7t+(c9t+2)-1]*scv_109/scv_221;
scv_299=scv_299-X[-c7t+(c8t+1)-1][-c7t+(c9t+2)-1]*scv_91/scv_342;
scv_329=scv_329-X[-c7t+(c8t+2)-1][-c7t+(c9t+2)-1]*scv_69/scv_433;
scv_462=scv_462-X[-c7t+(c8t+3)-1][-c7t+(c9t+2)-1]*scv_266/scv_395;
scv_419=scv_419-X[-c7t+c8t-1][-c7t+(c9t+3)-1]*scv_145/scv_400;
scv_387=scv_387-X[-c7t+(c8t+1)-1][-c7t+(c9t+3)-1]*scv_489/scv_170;
scv_334=scv_334-X[-c7t+(c8t+2)-1][-c7t+(c9t+3)-1]*scv_282/scv_201;
scv_147=scv_147-X[-c7t+(c8t+3)-1][-c7t+(c9t+3)-1]*scv_429/scv_211;
scv_455=scv_455-X[-(c7t+1)+c8t-1][-(c7t+1)+c9t-1]*scv_245/scv_417;
scv_483=scv_483-X[-(c7t+1)+(c8t+1)-1][-(c7t+1)+c9t-1]*scv_492/scv_389;
scv_214=scv_214-X[-(c7t+1)+(c8t+2)-1][-(c7t+1)+c9t-1]*scv_427/scv_361;
scv_257=scv_257-X[-(c7t+1)+(c8t+3)-1][-(c7t+1)+c9t-1]*scv_175/scv_397;
scv_207=scv_207-X[-(c7t+1)+c8t-1][-(c7t+1)+(c9t+1)-1]*scv_263/scv_172;
scv_407=scv_407-X[-(c7t+1)+(c8t+1)-1][-(c7t+1)+(c9t+1)-1]*scv_469/scv_300;
scv_297=scv_297-X[-(c7t+1)+(c8t+2)-1][-(c7t+1)+(c9t+1)-1]*scv_184/scv_453;
scv_324=scv_324-X[-(c7t+1)+(c8t+3)-1][-(c7t+1)+(c9t+1)-1]*scv_111/scv_409;
scv_197=scv_197-X[-(c7t+1)+c8t-1][-(c7t+1)+(c9t+2)-1]*scv_225/scv_252;
scv_340=scv_340-X[-(c7t+1)+(c8t+1)-1][-(c7t+1)+(c9t+2)-1]*scv_337/scv_494;
scv_295=scv_295-X[-(c7t+1)+(c8t+2)-1][-(c7t+1)+(c9t+2)-1]*scv_425/scv_223;
scv_104=scv_104-X[-(c7t+1)+(c8t+3)-1][-(c7t+1)+(c9t+2)-1]*scv_375/scv_129;
scv_366=scv_366-X[-(c7t+1)+c8t-1][-(c7t+1)+(c9t+3)-1]*scv_456/scv_303;
scv_418=scv_418-X[-(c7t+1)+(c8t+1)-1][-(c7t+1)+(c9t+3)-1]*scv_451/scv_180;
scv_71=scv_71-X[-(c7t+1)+(c8t+2)-1][-(c7t+1)+(c9t+3)-1]*scv_253/scv_440;
scv_250=scv_250-X[-(c7t+1)+(c8t+3)-1][-(c7t+1)+(c9t+3)-1]*scv_346/scv_75;
scv_78=scv_78-X[-(c7t+2)+c8t-1][-(c7t+2)+c9t-1]*scv_117/scv_155;
scv_488=scv_488-X[-(c7t+2)+(c8t+1)-1][-(c7t+2)+c9t-1]*scv_196/scv_110;
scv_485=scv_485-X[-(c7t+2)+(c8t+2)-1][-(c7t+2)+c9t-1]*scv_273/scv_248;
scv_305=scv_305-X[-(c7t+2)+(c8t+3)-1][-(c7t+2)+c9t-1]*scv_476/scv_500;
scv_166=scv_166-X[-(c7t+2)+c8t-1][-(c7t+2)+(c9t+1)-1]*scv_364/scv_135;
scv_139=scv_139-X[-(c7t+2)+(c8t+1)-1][-(c7t+2)+(c9t+1)-1]*scv_84/scv_192;
scv_87=scv_87-X[-(c7t+2)+(c8t+2)-1][-(c7t+2)+(c9t+1)-1]*scv_199/scv_504;
scv_365=scv_365-X[-(c7t+2)+(c8t+3)-1][-(c7t+2)+(c9t+1)-1]*scv_426/scv_448;
scv_406=scv_406-X[-(c7t+2)+c8t-1][-(c7t+2)+(c9t+2)-1]*scv_114/scv_482;
scv_431=scv_431-X[-(c7t+2)+(c8t+1)-1][-(c7t+2)+(c9t+2)-1]*scv_296/scv_499;
scv_191=scv_191-X[-(c7t+2)+(c8t+2)-1][-(c7t+2)+(c9t+2)-1]*scv_64/scv_65;
scv_88=scv_88-X[-(c7t+2)+(c8t+3)-1][-(c7t+2)+(c9t+2)-1]*scv_99/scv_76;
scv_338=scv_338-X[-(c7t+2)+c8t-1][-(c7t+2)+(c9t+3)-1]*scv_280/scv_143;
scv_67=scv_67-X[-(c7t+2)+(c8t+1)-1][-(c7t+2)+(c9t+3)-1]*scv_222/scv_304;
scv_370=scv_370-X[-(c7t+2)+(c8t+2)-1][-(c7t+2)+(c9t+3)-1]*scv_307/scv_332;
scv_391=scv_391-X[-(c7t+2)+(c8t+3)-1][-(c7t+2)+(c9t+3)-1]*scv_315/scv_301;
scv_79=scv_79-X[-(c7t+3)+c8t-1][-(c7t+3)+c9t-1]*scv_399/scv_168;
scv_234=scv_234-X[-(c7t+3)+(c8t+1)-1][-(c7t+3)+c9t-1]*scv_477/scv_125;
scv_396=scv_396-X[-(c7t+3)+(c8t+2)-1][-(c7t+3)+c9t-1]*scv_247/scv_392;
scv_454=scv_454-X[-(c7t+3)+(c8t+3)-1][-(c7t+3)+c9t-1]*scv_116/scv_353;
scv_347=scv_347-X[-(c7t+3)+c8t-1][-(c7t+3)+(c9t+1)-1]*scv_506/scv_194;
scv_484=scv_484-X[-(c7t+3)+(c8t+1)-1][-(c7t+3)+(c9t+1)-1]*scv_115/scv_218;
scv_312=scv_312-X[-(c7t+3)+(c8t+2)-1][-(c7t+3)+(c9t+1)-1]*scv_146/scv_401;
scv_72=scv_72-X[-(c7t+3)+(c8t+3)-1][-(c7t+3)+(c9t+1)-1]*scv_205/scv_435;
scv_502=scv_502-X[-(c7t+3)+c8t-1][-(c7t+3)+(c9t+2)-1]*scv_465/scv_256;
scv_105=scv_105-X[-(c7t+3)+(c8t+1)-1][-(c7t+3)+(c9t+2)-1]*scv_349/scv_461;
scv_309=scv_309-X[-(c7t+3)+(c8t+2)-1][-(c7t+3)+(c9t+2)-1]*scv_507/scv_138;
scv_162=scv_162-X[-(c7t+3)+(c8t+3)-1][-(c7t+3)+(c9t+2)-1]*scv_182/scv_134;
scv_261=scv_261-X[-(c7t+3)+c8t-1][-(c7t+3)+(c9t+3)-1]*scv_428/scv_368;
scv_181=scv_181-X[-(c7t+3)+(c8t+1)-1][-(c7t+3)+(c9t+3)-1]*scv_379/scv_133;
scv_140=scv_140-X[-(c7t+3)+(c8t+2)-1][-(c7t+3)+(c9t+3)-1]*scv_258/scv_351;
scv_445=scv_445-X[-(c7t+3)+(c8t+3)-1][-(c7t+3)+(c9t+3)-1]*scv_403/scv_408;
scv_107=scv_107-scv_269*scv_269/scv_204;
scv_424=scv_424-scv_357*scv_357/scv_439;
scv_362=scv_362-scv_239*scv_239/scv_356;
scv_251=scv_251-scv_270*scv_270/scv_360;
scv_358=scv_358-scv_112*scv_112/scv_380;
scv_193=scv_193-scv_466*scv_466/scv_447;
scv_100=scv_100-scv_487*scv_487/scv_310;
scv_96=scv_96-scv_505*scv_505/scv_165;
scv_371=scv_371-scv_109*scv_109/scv_221;
scv_377=scv_377-scv_91*scv_91/scv_342;
scv_374=scv_374-scv_69*scv_69/scv_433;
scv_478=scv_478-scv_266*scv_266/scv_395;
scv_183=scv_183-scv_145*scv_145/scv_400;
scv_126=scv_126-scv_489*scv_489/scv_170;
scv_119=scv_119-scv_282*scv_282/scv_201;
scv_237=scv_237-scv_429*scv_429/scv_211;
scv_188=scv_188-scv_245*scv_245/scv_417;
scv_186=scv_186-scv_492*scv_492/scv_389;
scv_153=scv_153-scv_427*scv_427/scv_361;
scv_284=scv_284-scv_175*scv_175/scv_397;
scv_230=scv_230-scv_263*scv_263/scv_172;
scv_121=scv_121-scv_469*scv_469/scv_300;
scv_313=scv_313-scv_184*scv_184/scv_453;
scv_475=scv_475-scv_111*scv_111/scv_409;
scv_106=scv_106-scv_225*scv_225/scv_252;
scv_420=scv_420-scv_337*scv_337/scv_494;
scv_372=scv_372-scv_425*scv_425/scv_223;
scv_120=scv_120-scv_375*scv_375/scv_129;
scv_288=scv_288-scv_456*scv_456/scv_303;
scv_235=scv_235-scv_451*scv_451/scv_180;
scv_101=scv_101-scv_253*scv_253/scv_440;
scv_508=scv_508-scv_346*scv_346/scv_75;
scv_243=scv_243-scv_117*scv_117/scv_155;
scv_509=scv_509-scv_196*scv_196/scv_110;
scv_430=scv_430-scv_273*scv_273/scv_248;
scv_219=scv_219-scv_476*scv_476/scv_500;
scv_200=scv_200-scv_364*scv_364/scv_135;
scv_142=scv_142-scv_84*scv_84/scv_192;
scv_318=scv_318-scv_199*scv_199/scv_504;
scv_203=scv_203-scv_426*scv_426/scv_448;
scv_323=scv_323-scv_114*scv_114/scv_482;
scv_345=scv_345-scv_296*scv_296/scv_499;
scv_306=scv_306-scv_64*scv_64/scv_65;
scv_390=scv_390-scv_99*scv_99/scv_76;
scv_90=scv_90-scv_280*scv_280/scv_143;
scv_131=scv_131-scv_222*scv_222/scv_304;
scv_490=scv_490-scv_307*scv_307/scv_332;
scv_122=scv_122-scv_315*scv_315/scv_301;
scv_480=scv_480-scv_399*scv_399/scv_168;
scv_293=scv_293-scv_477*scv_477/scv_125;
scv_412=scv_412-scv_247*scv_247/scv_392;
scv_195=scv_195-scv_116*scv_116/scv_353;
scv_226=scv_226-scv_506*scv_506/scv_194;
scv_113=scv_113-scv_115*scv_115/scv_218;
scv_244=scv_244-scv_146*scv_146/scv_401;
scv_459=scv_459-scv_205*scv_205/scv_435;
scv_68=scv_68-scv_465*scv_465/scv_256;
scv_341=scv_341-scv_349*scv_349/scv_461;
scv_302=scv_302-scv_507*scv_507/scv_138;
scv_325=scv_325-scv_182*scv_182/scv_134;
scv_381=scv_381-scv_428*scv_428/scv_368;
scv_161=scv_161-scv_379*scv_379/scv_133;
scv_421=scv_421-scv_258*scv_258/scv_351;
scv_83=scv_83-scv_403*scv_403/scv_408;
X[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+3)-1]=scv_67;
B[-(c7t+3)+c8t][-(c7t+3)+(c9t+2)-1]=scv_68;
B[-(c7t+3)+c8t][-(c7t+3)+(c9t+1)]=scv_70;
X[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+3)-1]=scv_71;
X[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+1)-1]=scv_72;
X[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+3)]=scv_73;
B[-(c7t+1)+c8t][-(c7t+1)+(c9t+2)]=scv_77;
X[-(c7t+2)+c8t][-(c7t+2)+c9t-1]=scv_78;
X[-(c7t+3)+c8t][-(c7t+3)+c9t-1]=scv_79;
X[-c7t+(c8t+2)][-c7t+(c9t+1)-1]=scv_81;
X[-c7t+c8t][-c7t+(c9t+1)-1]=scv_82;
B[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+3)-1]=scv_83;
B[-c7t+(c8t+1)][-c7t+c9t]=scv_85;
B[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+3)]=scv_86;
X[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+1)-1]=scv_87;
X[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+2)-1]=scv_88;
B[-(c7t+2)+c8t][-(c7t+2)+(c9t+3)-1]=scv_90;
X[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+2)]=scv_93;
B[-c7t+(c8t+3)][-c7t+(c9t+1)-1]=scv_96;
B[-(c7t+3)+(c8t+1)][-(c7t+3)+c9t]=scv_97;
B[-c7t+(c8t+2)][-c7t+(c9t+1)-1]=scv_100;
B[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+3)-1]=scv_101;
X[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+3)]=scv_102;
X[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+2)-1]=scv_104;
X[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+2)-1]=scv_105;
B[-(c7t+1)+c8t][-(c7t+1)+(c9t+2)-1]=scv_106;
B[-c7t+c8t][-c7t+c9t-1]=scv_107;
X[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+1)]=scv_108;
B[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+1)-1]=scv_113;
X[-(c7t+1)+(c8t+2)][-(c7t+1)+c9t]=scv_118;
B[-c7t+(c8t+2)][-c7t+(c9t+3)-1]=scv_119;
B[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+2)-1]=scv_120;
B[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+1)-1]=scv_121;
B[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+3)-1]=scv_122;
X[-c7t+(c8t+2)][-c7t+(c9t+1)]=scv_123;
B[-(c7t+3)+(c8t+2)][-(c7t+3)+c9t]=scv_124;
B[-c7t+(c8t+1)][-c7t+(c9t+3)-1]=scv_126;
X[-c7t+c8t][-c7t+c9t-1]=scv_128;
X[-(c7t+2)+c8t][-(c7t+2)+(c9t+3)]=scv_130;
B[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+3)-1]=scv_131;
X[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+3)]=scv_132;
X[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+1)]=scv_136;
X[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+2)]=scv_137;
X[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+1)-1]=scv_139;
X[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+3)-1]=scv_140;
X[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+1)]=scv_141;
B[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+1)-1]=scv_142;
X[-c7t+(c8t+3)][-c7t+(c9t+3)-1]=scv_147;
X[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+1)]=scv_148;
X[-c7t+(c8t+3)][-c7t+(c9t+1)]=scv_149;
B[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+3)]=scv_150;
X[-c7t+(c8t+1)][-c7t+(c9t+3)]=scv_152;
B[-(c7t+1)+(c8t+2)][-(c7t+1)+c9t-1]=scv_153;
B[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+3)]=scv_154;
X[-c7t+(c8t+2)][-c7t+c9t-1]=scv_157;
X[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+2)]=scv_160;
B[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+3)-1]=scv_161;
X[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+2)-1]=scv_162;
X[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+2)]=scv_163;
X[-c7t+(c8t+3)][-c7t+c9t-1]=scv_164;
X[-(c7t+2)+c8t][-(c7t+2)+(c9t+1)-1]=scv_166;
B[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+1)]=scv_167;
X[-(c7t+2)+(c8t+1)][-(c7t+2)+c9t]=scv_169;
X[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+3)]=scv_171;
B[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+2)]=scv_173;
B[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+2)]=scv_174;
X[-c7t+(c8t+3)][-c7t+(c9t+2)]=scv_176;
B[-c7t+(c8t+1)][-c7t+(c9t+1)]=scv_177;
B[-(c7t+3)+c8t][-(c7t+3)+c9t]=scv_178;
X[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+3)-1]=scv_181;
B[-c7t+c8t][-c7t+(c9t+3)-1]=scv_183;
X[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+3)]=scv_185;
B[-(c7t+1)+(c8t+1)][-(c7t+1)+c9t-1]=scv_186;
B[-c7t+(c8t+1)][-c7t+(c9t+2)]=scv_187;
B[-(c7t+1)+c8t][-(c7t+1)+c9t-1]=scv_188;
X[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+3)]=scv_190;
X[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+2)-1]=scv_191;
B[-c7t+(c8t+1)][-c7t+(c9t+1)-1]=scv_193;
B[-(c7t+3)+(c8t+3)][-(c7t+3)+c9t-1]=scv_195;
X[-(c7t+1)+c8t][-(c7t+1)+(c9t+2)-1]=scv_197;
B[-c7t+(c8t+2)][-c7t+c9t]=scv_198;
B[-(c7t+2)+c8t][-(c7t+2)+(c9t+1)-1]=scv_200;
X[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+1)]=scv_202;
B[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+1)-1]=scv_203;
X[-(c7t+1)+c8t][-(c7t+1)+(c9t+1)-1]=scv_207;
X[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+1)]=scv_208;
B[-(c7t+2)+c8t][-(c7t+2)+(c9t+1)]=scv_209;
X[-c7t+(c8t+2)][-c7t+(c9t+3)]=scv_210;
X[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+3)]=scv_213;
X[-(c7t+1)+(c8t+2)][-(c7t+1)+c9t-1]=scv_214;
X[-c7t+c8t][-c7t+(c9t+2)-1]=scv_215;
X[-c7t+(c8t+1)][-c7t+(c9t+1)]=scv_216;
B[-c7t+c8t][-c7t+(c9t+3)]=scv_217;
B[-(c7t+2)+(c8t+3)][-(c7t+2)+c9t-1]=scv_219;
X[-(c7t+3)+c8t][-(c7t+3)+(c9t+3)]=scv_224;
B[-(c7t+3)+c8t][-(c7t+3)+(c9t+1)-1]=scv_226;
B[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+3)]=scv_228;
X[-c7t+(c8t+3)][-c7t+(c9t+3)]=scv_229;
B[-(c7t+1)+c8t][-(c7t+1)+(c9t+1)-1]=scv_230;
B[-(c7t+1)+(c8t+1)][-(c7t+1)+c9t]=scv_231;
B[-(c7t+2)+(c8t+3)][-(c7t+2)+c9t]=scv_232;
X[-c7t+(c8t+3)][-c7t+(c9t+1)-1]=scv_233;
X[-(c7t+3)+(c8t+1)][-(c7t+3)+c9t-1]=scv_234;
B[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+3)-1]=scv_235;
B[-c7t+(c8t+3)][-c7t+(c9t+3)-1]=scv_237;
B[-(c7t+1)+(c8t+3)][-(c7t+1)+c9t]=scv_238;
B[-(c7t+2)+c8t][-(c7t+2)+c9t-1]=scv_243;
B[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+1)-1]=scv_244;
B[-c7t+(c8t+1)][-c7t+(c9t+3)]=scv_246;
X[-(c7t+1)+(c8t+1)][-(c7t+1)+c9t]=scv_249;
X[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+3)-1]=scv_250;
B[-c7t+(c8t+3)][-c7t+c9t-1]=scv_251;
X[-(c7t+2)+c8t][-(c7t+2)+c9t]=scv_254;
X[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+2)]=scv_255;
X[-(c7t+1)+(c8t+3)][-(c7t+1)+c9t-1]=scv_257;
B[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+2)]=scv_259;
B[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+1)]=scv_260;
X[-(c7t+3)+c8t][-(c7t+3)+(c9t+3)-1]=scv_261;
B[-(c7t+2)+c8t][-(c7t+2)+(c9t+3)]=scv_262;
X[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+1)]=scv_265;
X[-c7t+(c8t+2)][-c7t+(c9t+2)]=scv_267;
X[-(c7t+3)+c8t][-(c7t+3)+(c9t+1)]=scv_272;
B[-c7t+c8t][-c7t+(c9t+2)]=scv_274;
B[-(c7t+2)+(c8t+1)][-(c7t+2)+c9t]=scv_277;
X[-(c7t+3)+c8t][-(c7t+3)+(c9t+2)]=scv_279;
X[-(c7t+2)+(c8t+3)][-(c7t+2)+c9t]=scv_281;
B[-c7t+(c8t+2)][-c7t+(c9t+1)]=scv_283;
B[-(c7t+1)+(c8t+3)][-(c7t+1)+c9t-1]=scv_284;
B[-c7t+(c8t+3)][-c7t+c9t]=scv_285;
B[-(c7t+1)+c8t][-(c7t+1)+(c9t+3)-1]=scv_288;
X[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+1)]=scv_289;
X[-(c7t+1)+c8t][-(c7t+1)+c9t]=scv_291;
B[-(c7t+3)+(c8t+1)][-(c7t+3)+c9t-1]=scv_293;
B[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+1)]=scv_294;
X[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+2)-1]=scv_295;
X[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+1)-1]=scv_297;
X[-c7t+(c8t+1)][-c7t+(c9t+2)-1]=scv_299;
B[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+2)-1]=scv_302;
X[-(c7t+2)+(c8t+3)][-(c7t+2)+c9t-1]=scv_305;
B[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+2)-1]=scv_306;
X[-c7t+(c8t+1)][-c7t+c9t]=scv_308;
X[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+2)-1]=scv_309;
B[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+3)]=scv_311;
X[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+1)-1]=scv_312;
B[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+1)-1]=scv_313;
B[-(c7t+2)+c8t][-(c7t+2)+(c9t+2)]=scv_314;
X[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+2)]=scv_316;
X[-c7t+(c8t+1)][-c7t+(c9t+1)-1]=scv_317;
B[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+1)-1]=scv_318;
X[-(c7t+1)+c8t][-(c7t+1)+(c9t+1)]=scv_321;
B[-c7t+(c8t+3)][-c7t+(c9t+2)]=scv_322;
B[-(c7t+2)+c8t][-(c7t+2)+(c9t+2)-1]=scv_323;
X[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+1)-1]=scv_324;
B[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+2)-1]=scv_325;
X[-(c7t+1)+c8t][-(c7t+1)+(c9t+2)]=scv_326;
B[-c7t+(c8t+2)][-c7t+(c9t+2)]=scv_328;
X[-c7t+(c8t+2)][-c7t+(c9t+2)-1]=scv_329;
X[-c7t+c8t][-c7t+(c9t+3)]=scv_330;
X[-c7t+(c8t+2)][-c7t+(c9t+3)-1]=scv_334;
X[-c7t+(c8t+1)][-c7t+(c9t+2)]=scv_336;
X[-(c7t+2)+c8t][-(c7t+2)+(c9t+3)-1]=scv_338;
X[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+2)-1]=scv_340;
B[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+2)-1]=scv_341;
B[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+2)]=scv_343;
X[-(c7t+3)+(c8t+1)][-(c7t+3)+c9t]=scv_344;
B[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+2)-1]=scv_345;
X[-(c7t+3)+c8t][-(c7t+3)+(c9t+1)-1]=scv_347;
B[-(c7t+3)+c8t][-(c7t+3)+(c9t+3)]=scv_350;
X[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+2)]=scv_352;
B[-c7t+c8t][-c7t+(c9t+1)-1]=scv_358;
B[-c7t+(c8t+2)][-c7t+c9t-1]=scv_362;
X[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+3)]=scv_363;
X[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+1)-1]=scv_365;
X[-(c7t+1)+c8t][-(c7t+1)+(c9t+3)-1]=scv_366;
X[-(c7t+2)+(c8t+2)][-(c7t+2)+c9t]=scv_367;
X[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+3)-1]=scv_370;
B[-c7t+c8t][-c7t+(c9t+2)-1]=scv_371;
B[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+2)-1]=scv_372;
B[-c7t+(c8t+3)][-c7t+(c9t+3)]=scv_373;
B[-c7t+(c8t+2)][-c7t+(c9t+2)-1]=scv_374;
B[-c7t+(c8t+1)][-c7t+(c9t+2)-1]=scv_377;
X[-c7t+c8t][-c7t+(c9t+2)]=scv_378;
B[-(c7t+3)+c8t][-(c7t+3)+(c9t+3)-1]=scv_381;
B[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+2)]=scv_382;
B[-(c7t+1)+c8t][-(c7t+1)+c9t]=scv_383;
B[-c7t+c8t][-c7t+(c9t+1)]=scv_384;
X[-c7t+c8t][-c7t+(c9t+1)]=scv_385;
B[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+3)]=scv_386;
X[-c7t+(c8t+1)][-c7t+(c9t+3)-1]=scv_387;
B[-(c7t+2)+c8t][-(c7t+2)+c9t]=scv_388;
B[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+2)-1]=scv_390;
X[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+3)-1]=scv_391;
B[-c7t+c8t][-c7t+c9t]=scv_393;
X[-(c7t+2)+c8t][-(c7t+2)+(c9t+2)]=scv_394;
X[-(c7t+3)+(c8t+2)][-(c7t+3)+c9t-1]=scv_396;
B[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+3)]=scv_398;
B[-(c7t+3)+c8t][-(c7t+3)+(c9t+2)]=scv_405;
X[-(c7t+2)+c8t][-(c7t+2)+(c9t+2)-1]=scv_406;
X[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+1)-1]=scv_407;
X[-(c7t+2)+c8t][-(c7t+2)+(c9t+1)]=scv_410;
B[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+2)]=scv_411;
B[-(c7t+3)+(c8t+2)][-(c7t+3)+c9t-1]=scv_412;
B[-(c7t+1)+c8t][-(c7t+1)+(c9t+1)]=scv_414;
B[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+1)]=scv_415;
B[-(c7t+2)+(c8t+2)][-(c7t+2)+c9t]=scv_416;
X[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+3)-1]=scv_418;
X[-c7t+c8t][-c7t+(c9t+3)-1]=scv_419;
B[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+2)-1]=scv_420;
B[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+3)-1]=scv_421;
X[-c7t+(c8t+1)][-c7t+c9t-1]=scv_422;
X[-(c7t+1)+(c8t+2)][-(c7t+1)+(c9t+2)]=scv_423;
B[-c7t+(c8t+1)][-c7t+c9t-1]=scv_424;
B[-(c7t+2)+(c8t+2)][-(c7t+2)+c9t-1]=scv_430;
X[-(c7t+2)+(c8t+1)][-(c7t+2)+(c9t+2)-1]=scv_431;
B[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+1)]=scv_432;
B[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+1)]=scv_436;
B[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+2)]=scv_438;
B[-(c7t+3)+(c8t+2)][-(c7t+3)+(c9t+2)]=scv_441;
B[-(c7t+3)+(c8t+3)][-(c7t+3)+c9t]=scv_442;
B[-c7t+(c8t+2)][-c7t+(c9t+3)]=scv_443;
B[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+3)]=scv_444;
X[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+3)-1]=scv_445;
B[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+1)]=scv_449;
X[-c7t+c8t][-c7t+c9t]=scv_450;
X[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+1)]=scv_452;
X[-(c7t+3)+(c8t+3)][-(c7t+3)+c9t-1]=scv_454;
X[-(c7t+1)+c8t][-(c7t+1)+c9t-1]=scv_455;
B[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+1)]=scv_458;
B[-(c7t+3)+(c8t+3)][-(c7t+3)+(c9t+1)-1]=scv_459;
X[-c7t+(c8t+3)][-c7t+(c9t+2)-1]=scv_462;
X[-(c7t+3)+(c8t+3)][-(c7t+3)+c9t]=scv_463;
X[-(c7t+3)+(c8t+2)][-(c7t+3)+c9t]=scv_467;
X[-c7t+(c8t+2)][-c7t+c9t]=scv_468;
X[-(c7t+2)+(c8t+3)][-(c7t+2)+(c9t+2)]=scv_470;
B[-c7t+(c8t+3)][-c7t+(c9t+1)]=scv_471;
B[-(c7t+1)+c8t][-(c7t+1)+(c9t+3)]=scv_472;
X[-(c7t+3)+c8t][-(c7t+3)+c9t]=scv_473;
B[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+1)-1]=scv_475;
B[-c7t+(c8t+3)][-c7t+(c9t+2)-1]=scv_478;
B[-(c7t+1)+(c8t+1)][-(c7t+1)+(c9t+3)]=scv_479;
B[-(c7t+3)+c8t][-(c7t+3)+c9t-1]=scv_480;
B[-(c7t+1)+(c8t+2)][-(c7t+1)+c9t]=scv_481;
X[-(c7t+1)+(c8t+1)][-(c7t+1)+c9t-1]=scv_483;
X[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+1)-1]=scv_484;
X[-(c7t+2)+(c8t+2)][-(c7t+2)+c9t-1]=scv_485;
B[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+2)]=scv_486;
X[-(c7t+2)+(c8t+1)][-(c7t+2)+c9t-1]=scv_488;
B[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+3)-1]=scv_490;
X[-(c7t+1)+c8t][-(c7t+1)+(c9t+3)]=scv_491;
B[-(c7t+3)+(c8t+1)][-(c7t+3)+(c9t+1)]=scv_493;
X[-(c7t+2)+(c8t+2)][-(c7t+2)+(c9t+3)]=scv_497;
X[-(c7t+1)+(c8t+3)][-(c7t+1)+c9t]=scv_498;
X[-c7t+(c8t+3)][-c7t+c9t]=scv_501;
X[-(c7t+3)+c8t][-(c7t+3)+(c9t+2)-1]=scv_502;
B[-(c7t+1)+(c8t+3)][-(c7t+1)+(c9t+3)-1]=scv_508;
B[-(c7t+2)+(c8t+1)][-(c7t+2)+c9t-1]=scv_509;
}
#pragma ivdep
#pragma vector always
for (c8=c8t; c8<=newub_c8; c8=c8+1) {
double scv_512, scv_513, scv_514, scv_515, scv_516, scv_517, scv_518, scv_519;
double scv_520, scv_521, scv_522, scv_523, scv_524, scv_525, scv_526, scv_527;
double scv_528, scv_529, scv_530, scv_531, scv_532, scv_533, scv_534, scv_535;
double scv_536, scv_537, scv_538, scv_539, scv_540, scv_541, scv_542, scv_543;
double scv_544, scv_545, scv_546, scv_547, scv_548, scv_549, scv_550, scv_551;
double scv_552, scv_553, scv_554, scv_555, scv_556, scv_557, scv_558, scv_559;
double scv_560, scv_561, scv_562, scv_563, scv_564, scv_565, scv_566, scv_567;
double scv_568, scv_569, scv_570, scv_571, scv_572, scv_573, scv_574, scv_575;
double scv_576, scv_577, scv_578, scv_579, scv_580, scv_581, scv_582, scv_583;
double scv_584, scv_585, scv_586, scv_587, scv_588, scv_589, scv_590, scv_591;
double scv_592, scv_593, scv_594, scv_595, scv_596, scv_597, scv_598, scv_599;
double scv_600, scv_601, scv_602, scv_603, scv_604, scv_605, scv_606, scv_607;
double scv_608, scv_609, scv_610, scv_611, scv_612, scv_613, scv_614, scv_615;
double scv_616, scv_617, scv_618, scv_619, scv_620, scv_621, scv_622, scv_623;
scv_512=X[-c7t+c8][-c7t+c9t];
scv_513=X[-(c7t+3)+c8][-(c7t+3)+(c9t+2)];
scv_514=A[-(c7t+1)+c8][-(c7t+1)+(c9t+3)-1];
scv_515=X[-(c7t+1)+c8][-(c7t+1)+(c9t+1)];
scv_516=A[-c7t+c8][-c7t+c9t];
scv_517=A[-(c7t+3)+c8][-(c7t+3)+(c9t+2)-1];
scv_518=B[-(c7t+1)+c8][-(c7t+1)+(c9t+2)];
scv_519=B[-(c7t+3)+c8][-(c7t+3)+c9t-1];
scv_520=A[-c7t+c8][-c7t+c9t-1];
scv_521=X[-(c7t+3)+c8][-(c7t+3)+c9t-1];
scv_522=X[-(c7t+2)+c8][-(c7t+2)+(c9t+3)-1];
scv_523=A[-(c7t+1)+c8][-(c7t+1)+(c9t+3)];
scv_524=A[-(c7t+2)+c8][-(c7t+2)+(c9t+2)];
scv_525=A[-(c7t+3)+c8][-(c7t+3)+(c9t+1)-1];
scv_526=B[-c7t+c8-1][-c7t+(c9t+2)-1];
scv_527=B[-(c7t+2)+c8][-(c7t+2)+(c9t+3)-1];
scv_528=B[-(c7t+1)+c8][-(c7t+1)+(c9t+2)-1];
scv_529=A[-(c7t+3)+c8][-(c7t+3)+(c9t+3)];
scv_530=X[-(c7t+1)+c8][-(c7t+1)+(c9t+2)];
scv_531=X[-c7t+c8][-c7t+(c9t+1)-1];
scv_532=B[-c7t+c8][-c7t+(c9t+2)-1];
scv_533=B[-(c7t+1)+c8][-(c7t+1)+c9t-1];
scv_534=A[-c7t+c8][-c7t+(c9t+2)];
scv_535=B[-(c7t+1)+c8][-(c7t+1)+(c9t+1)];
scv_536=B[-(c7t+2)+c8][-(c7t+2)+(c9t+2)-1];
scv_537=B[-(c7t+3)+c8][-(c7t+3)+(c9t+3)-1];
scv_538=B[-c7t+c8][-c7t+(c9t+3)-1];
scv_539=A[-(c7t+2)+c8][-(c7t+2)+(c9t+1)];
scv_540=B[-c7t+c8][-c7t+(c9t+3)];
scv_541=A[-(c7t+2)+c8][-(c7t+2)+c9t];
scv_542=X[-(c7t+2)+c8][-(c7t+2)+c9t-1];
scv_543=A[-(c7t+3)+c8][-(c7t+3)+(c9t+1)];
scv_544=X[-(c7t+1)+c8][-(c7t+1)+(c9t+3)-1];
scv_545=B[-(c7t+3)+c8-1][-(c7t+3)+(c9t+3)-1];
scv_546=X[-c7t+c8][-c7t+(c9t+2)];
scv_547=B[-(c7t+3)+c8][-(c7t+3)+(c9t+3)];
scv_548=B[-(c7t+2)+c8][-(c7t+2)+(c9t+1)-1];
scv_549=B[-(c7t+2)+c8][-(c7t+2)+(c9t+2)];
scv_550=B[-(c7t+3)+c8-1][-(c7t+3)+(c9t+1)-1];
scv_551=B[-c7t+c8-1][-c7t+(c9t+3)-1];
scv_552=X[-c7t+c8][-c7t+c9t-1];
scv_553=B[-(c7t+1)+c8][-(c7t+1)+(c9t+3)-1];
scv_554=B[-(c7t+1)+c8-1][-(c7t+1)+(c9t+3)-1];
scv_555=B[-(c7t+2)+c8][-(c7t+2)+(c9t+1)];
scv_556=X[-c7t+c8][-c7t+(c9t+1)];
scv_557=X[-(c7t+3)+c8][-(c7t+3)+(c9t+2)-1];
scv_558=A[-(c7t+1)+c8][-(c7t+1)+(c9t+2)-1];
scv_559=X[-(c7t+2)+c8][-(c7t+2)+(c9t+3)];
scv_560=X[-(c7t+3)+c8][-(c7t+3)+(c9t+1)];
scv_561=X[-(c7t+2)+c8][-(c7t+2)+(c9t+2)-1];
scv_562=X[-c7t+c8][-c7t+(c9t+2)-1];
scv_563=A[-(c7t+1)+c8][-(c7t+1)+c9t-1];
scv_564=X[-(c7t+3)+c8][-(c7t+3)+(c9t+3)];
scv_565=A[-(c7t+2)+c8][-(c7t+2)+(c9t+2)-1];
scv_566=B[-(c7t+2)+c8-1][-(c7t+2)+c9t-1];
scv_567=A[-(c7t+2)+c8][-(c7t+2)+(c9t+3)-1];
scv_568=A[-(c7t+3)+c8][-(c7t+3)+c9t];
scv_569=B[-(c7t+2)+c8-1][-(c7t+2)+(c9t+2)-1];
scv_570=B[-(c7t+3)+c8-1][-(c7t+3)+c9t-1];
scv_571=A[-(c7t+1)+c8][-(c7t+1)+(c9t+1)];
scv_572=B[-(c7t+3)+c8][-(c7t+3)+c9t];
scv_573=B[-(c7t+2)+c8-1][-(c7t+2)+(c9t+3)-1];
scv_574=B[-c7t+c8][-c7t+c9t];
scv_575=X[-(c7t+2)+c8][-(c7t+2)+(c9t+1)];
scv_576=A[-(c7t+1)+c8][-(c7t+1)+(c9t+2)];
scv_577=B[-(c7t+1)+c8][-(c7t+1)+c9t];
scv_578=B[-(c7t+1)+c8-1][-(c7t+1)+(c9t+1)-1];
scv_579=A[-(c7t+2)+c8][-(c7t+2)+c9t-1];
scv_580=B[-(c7t+2)+c8][-(c7t+2)+c9t-1];
scv_581=B[-(c7t+3)+c8-1][-(c7t+3)+(c9t+2)-1];
scv_582=B[-(c7t+1)+c8][-(c7t+1)+(c9t+3)];
scv_583=B[-c7t+c8-1][-c7t+c9t-1];
scv_584=A[-(c7t+3)+c8][-(c7t+3)+(c9t+2)];
scv_585=X[-(c7t+1)+c8][-(c7t+1)+(c9t+3)];
scv_586=A[-(c7t+1)+c8][-(c7t+1)+c9t];
scv_587=A[-(c7t+2)+c8][-(c7t+2)+(c9t+3)];
scv_588=X[-(c7t+1)+c8][-(c7t+1)+c9t];
scv_589=A[-c7t+c8][-c7t+(c9t+3)];
scv_590=X[-c7t+c8][-c7t+(c9t+3)-1];
scv_591=A[-c7t+c8][-c7t+(c9t+2)-1];
scv_592=A[-c7t+c8][-c7t+(c9t+1)-1];
scv_593=B[-c7t+c8][-c7t+(c9t+2)];
scv_594=B[-c7t+c8-1][-c7t+(c9t+1)-1];
scv_595=B[-(c7t+3)+c8][-(c7t+3)+(c9t+2)-1];
scv_596=B[-c7t+c8][-c7t+(c9t+1)-1];
scv_597=B[-c7t+c8][-c7t+(c9t+1)];
scv_598=B[-(c7t+1)+c8-1][-(c7t+1)+(c9t+2)-1];
scv_599=A[-(c7t+1)+c8][-(c7t+1)+(c9t+1)-1];
scv_600=X[-(c7t+2)+c8][-(c7t+2)+c9t];
scv_601=B[-(c7t+3)+c8][-(c7t+3)+(c9t+1)];
scv_602=A[-(c7t+2)+c8][-(c7t+2)+(c9t+1)-1];
scv_603=B[-(c7t+1)+c8-1][-(c7t+1)+c9t-1];
scv_604=X[-(c7t+3)+c8][-(c7t+3)+(c9t+3)-1];
scv_605=B[-(c7t+3)+c8][-(c7t+3)+(c9t+2)];
scv_606=A[-(c7t+3)+c8][-(c7t+3)+c9t-1];
scv_607=B[-(c7t+2)+c8][-(c7t+2)+(c9t+3)];
scv_608=X[-(c7t+3)+c8][-(c7t+3)+(c9t+1)-1];
scv_609=X[-(c7t+1)+c8][-(c7t+1)+(c9t+1)-1];
scv_610=A[-c7t+c8][-c7t+(c9t+3)-1];
scv_611=X[-(c7t+1)+c8][-(c7t+1)+c9t-1];
scv_612=B[-(c7t+2)+c8][-(c7t+2)+c9t];
scv_613=A[-c7t+c8][-c7t+(c9t+1)];
scv_614=B[-c7t+c8][-c7t+c9t-1];
scv_615=X[-(c7t+2)+c8][-(c7t+2)+(c9t+2)];
scv_616=A[-(c7t+3)+c8][-(c7t+3)+(c9t+3)-1];
scv_617=B[-(c7t+1)+c8][-(c7t+1)+(c9t+1)-1];
scv_618=X[-(c7t+3)+c8][-(c7t+3)+c9t];
scv_619=X[-c7t+c8][-c7t+(c9t+3)];
scv_620=B[-(c7t+3)+c8][-(c7t+3)+(c9t+1)-1];
scv_621=B[-(c7t+2)+c8-1][-(c7t+2)+(c9t+1)-1];
scv_622=X[-(c7t+2)+c8][-(c7t+2)+(c9t+1)-1];
scv_623=X[-(c7t+1)+c8][-(c7t+1)+(c9t+2)-1];
scv_512=scv_512-scv_552*scv_516/scv_614;
scv_556=scv_556-scv_531*scv_613/scv_596;
scv_546=scv_546-scv_562*scv_534/scv_532;
scv_619=scv_619-scv_590*scv_589/scv_538;
scv_588=scv_588-scv_611*scv_586/scv_533;
scv_515=scv_515-scv_609*scv_571/scv_617;
scv_530=scv_530-scv_623*scv_576/scv_528;
scv_585=scv_585-scv_544*scv_523/scv_553;
scv_600=scv_600-scv_542*scv_541/scv_580;
scv_575=scv_575-scv_622*scv_539/scv_548;
scv_615=scv_615-scv_561*scv_524/scv_536;
scv_559=scv_559-scv_522*scv_587/scv_527;
scv_618=scv_618-scv_521*scv_568/scv_519;
scv_560=scv_560-scv_608*scv_543/scv_620;
scv_513=scv_513-scv_557*scv_584/scv_595;
scv_564=scv_564-scv_604*scv_529/scv_537;
scv_574=scv_574-scv_516*scv_516/scv_614;
scv_597=scv_597-scv_613*scv_613/scv_596;
scv_593=scv_593-scv_534*scv_534/scv_532;
scv_540=scv_540-scv_589*scv_589/scv_538;
scv_577=scv_577-scv_586*scv_586/scv_533;
scv_535=scv_535-scv_571*scv_571/scv_617;
scv_518=scv_518-scv_576*scv_576/scv_528;
scv_582=scv_582-scv_523*scv_523/scv_553;
scv_612=scv_612-scv_541*scv_541/scv_580;
scv_555=scv_555-scv_539*scv_539/scv_548;
scv_549=scv_549-scv_524*scv_524/scv_536;
scv_607=scv_607-scv_587*scv_587/scv_527;
scv_572=scv_572-scv_568*scv_568/scv_519;
scv_601=scv_601-scv_543*scv_543/scv_620;
scv_605=scv_605-scv_584*scv_584/scv_595;
scv_547=scv_547-scv_529*scv_529/scv_537;
scv_552=scv_552-X[-c7t+c8-1][-c7t+c9t-1]*scv_520/scv_583;
scv_531=scv_531-X[-c7t+c8-1][-c7t+(c9t+1)-1]*scv_592/scv_594;
scv_562=scv_562-X[-c7t+c8-1][-c7t+(c9t+2)-1]*scv_591/scv_526;
scv_590=scv_590-X[-c7t+c8-1][-c7t+(c9t+3)-1]*scv_610/scv_551;
scv_611=scv_611-X[-(c7t+1)+c8-1][-(c7t+1)+c9t-1]*scv_563/scv_603;
scv_609=scv_609-X[-(c7t+1)+c8-1][-(c7t+1)+(c9t+1)-1]*scv_599/scv_578;
scv_623=scv_623-X[-(c7t+1)+c8-1][-(c7t+1)+(c9t+2)-1]*scv_558/scv_598;
scv_544=scv_544-X[-(c7t+1)+c8-1][-(c7t+1)+(c9t+3)-1]*scv_514/scv_554;
scv_542=scv_542-X[-(c7t+2)+c8-1][-(c7t+2)+c9t-1]*scv_579/scv_566;
scv_622=scv_622-X[-(c7t+2)+c8-1][-(c7t+2)+(c9t+1)-1]*scv_602/scv_621;
scv_561=scv_561-X[-(c7t+2)+c8-1][-(c7t+2)+(c9t+2)-1]*scv_565/scv_569;
scv_522=scv_522-X[-(c7t+2)+c8-1][-(c7t+2)+(c9t+3)-1]*scv_567/scv_573;
scv_521=scv_521-X[-(c7t+3)+c8-1][-(c7t+3)+c9t-1]*scv_606/scv_570;
scv_608=scv_608-X[-(c7t+3)+c8-1][-(c7t+3)+(c9t+1)-1]*scv_525/scv_550;
scv_557=scv_557-X[-(c7t+3)+c8-1][-(c7t+3)+(c9t+2)-1]*scv_517/scv_581;
scv_604=scv_604-X[-(c7t+3)+c8-1][-(c7t+3)+(c9t+3)-1]*scv_616/scv_545;
scv_614=scv_614-scv_520*scv_520/scv_583;
scv_596=scv_596-scv_592*scv_592/scv_594;
scv_532=scv_532-scv_591*scv_591/scv_526;
scv_538=scv_538-scv_610*scv_610/scv_551;
scv_533=scv_533-scv_563*scv_563/scv_603;
scv_617=scv_617-scv_599*scv_599/scv_578;
scv_528=scv_528-scv_558*scv_558/scv_598;
scv_553=scv_553-scv_514*scv_514/scv_554;
scv_580=scv_580-scv_579*scv_579/scv_566;
scv_548=scv_548-scv_602*scv_602/scv_621;
scv_536=scv_536-scv_565*scv_565/scv_569;
scv_527=scv_527-scv_567*scv_567/scv_573;
scv_519=scv_519-scv_606*scv_606/scv_570;
scv_620=scv_620-scv_525*scv_525/scv_550;
scv_595=scv_595-scv_517*scv_517/scv_581;
scv_537=scv_537-scv_616*scv_616/scv_545;
X[-c7t+c8][-c7t+c9t]=scv_512;
X[-(c7t+3)+c8][-(c7t+3)+(c9t+2)]=scv_513;
X[-(c7t+1)+c8][-(c7t+1)+(c9t+1)]=scv_515;
B[-(c7t+1)+c8][-(c7t+1)+(c9t+2)]=scv_518;
B[-(c7t+3)+c8][-(c7t+3)+c9t-1]=scv_519;
X[-(c7t+3)+c8][-(c7t+3)+c9t-1]=scv_521;
X[-(c7t+2)+c8][-(c7t+2)+(c9t+3)-1]=scv_522;
B[-(c7t+2)+c8][-(c7t+2)+(c9t+3)-1]=scv_527;
B[-(c7t+1)+c8][-(c7t+1)+(c9t+2)-1]=scv_528;
X[-(c7t+1)+c8][-(c7t+1)+(c9t+2)]=scv_530;
X[-c7t+c8][-c7t+(c9t+1)-1]=scv_531;
B[-c7t+c8][-c7t+(c9t+2)-1]=scv_532;
B[-(c7t+1)+c8][-(c7t+1)+c9t-1]=scv_533;
B[-(c7t+1)+c8][-(c7t+1)+(c9t+1)]=scv_535;
B[-(c7t+2)+c8][-(c7t+2)+(c9t+2)-1]=scv_536;
B[-(c7t+3)+c8][-(c7t+3)+(c9t+3)-1]=scv_537;
B[-c7t+c8][-c7t+(c9t+3)-1]=scv_538;
B[-c7t+c8][-c7t+(c9t+3)]=scv_540;
X[-(c7t+2)+c8][-(c7t+2)+c9t-1]=scv_542;
X[-(c7t+1)+c8][-(c7t+1)+(c9t+3)-1]=scv_544;
X[-c7t+c8][-c7t+(c9t+2)]=scv_546;
B[-(c7t+3)+c8][-(c7t+3)+(c9t+3)]=scv_547;
B[-(c7t+2)+c8][-(c7t+2)+(c9t+1)-1]=scv_548;
B[-(c7t+2)+c8][-(c7t+2)+(c9t+2)]=scv_549;
X[-c7t+c8][-c7t+c9t-1]=scv_552;
B[-(c7t+1)+c8][-(c7t+1)+(c9t+3)-1]=scv_553;
B[-(c7t+2)+c8][-(c7t+2)+(c9t+1)]=scv_555;
X[-c7t+c8][-c7t+(c9t+1)]=scv_556;
X[-(c7t+3)+c8][-(c7t+3)+(c9t+2)-1]=scv_557;
X[-(c7t+2)+c8][-(c7t+2)+(c9t+3)]=scv_559;
X[-(c7t+3)+c8][-(c7t+3)+(c9t+1)]=scv_560;
X[-(c7t+2)+c8][-(c7t+2)+(c9t+2)-1]=scv_561;
X[-c7t+c8][-c7t+(c9t+2)-1]=scv_562;
X[-(c7t+3)+c8][-(c7t+3)+(c9t+3)]=scv_564;
B[-(c7t+3)+c8][-(c7t+3)+c9t]=scv_572;
B[-c7t+c8][-c7t+c9t]=scv_574;
X[-(c7t+2)+c8][-(c7t+2)+(c9t+1)]=scv_575;
B[-(c7t+1)+c8][-(c7t+1)+c9t]=scv_577;
B[-(c7t+2)+c8][-(c7t+2)+c9t-1]=scv_580;
B[-(c7t+1)+c8][-(c7t+1)+(c9t+3)]=scv_582;
X[-(c7t+1)+c8][-(c7t+1)+(c9t+3)]=scv_585;
X[-(c7t+1)+c8][-(c7t+1)+c9t]=scv_588;
X[-c7t+c8][-c7t+(c9t+3)-1]=scv_590;
B[-c7t+c8][-c7t+(c9t+2)]=scv_593;
B[-(c7t+3)+c8][-(c7t+3)+(c9t+2)-1]=scv_595;
B[-c7t+c8][-c7t+(c9t+1)-1]=scv_596;
B[-c7t+c8][-c7t+(c9t+1)]=scv_597;
X[-(c7t+2)+c8][-(c7t+2)+c9t]=scv_600;
B[-(c7t+3)+c8][-(c7t+3)+(c9t+1)]=scv_601;
X[-(c7t+3)+c8][-(c7t+3)+(c9t+3)-1]=scv_604;
B[-(c7t+3)+c8][-(c7t+3)+(c9t+2)]=scv_605;
B[-(c7t+2)+c8][-(c7t+2)+(c9t+3)]=scv_607;
X[-(c7t+3)+c8][-(c7t+3)+(c9t+1)-1]=scv_608;
X[-(c7t+1)+c8][-(c7t+1)+(c9t+1)-1]=scv_609;
X[-(c7t+1)+c8][-(c7t+1)+c9t-1]=scv_611;
B[-(c7t+2)+c8][-(c7t+2)+c9t]=scv_612;
B[-c7t+c8][-c7t+c9t-1]=scv_614;
X[-(c7t+2)+c8][-(c7t+2)+(c9t+2)]=scv_615;
B[-(c7t+1)+c8][-(c7t+1)+(c9t+1)-1]=scv_617;
X[-(c7t+3)+c8][-(c7t+3)+c9t]=scv_618;
X[-c7t+c8][-c7t+(c9t+3)]=scv_619;
B[-(c7t+3)+c8][-(c7t+3)+(c9t+1)-1]=scv_620;
X[-(c7t+2)+c8][-(c7t+2)+(c9t+1)-1]=scv_622;
X[-(c7t+1)+c8][-(c7t+1)+(c9t+2)-1]=scv_623;
}
for (c7=c7t; c7<=c7t+3; c7=c7+1) {
register int cbv_9, cbv_10;
cbv_9=newub_c8+1;
cbv_10=min(c7+N-1,16*c2+15);
#pragma ivdep
#pragma vector always
for (c8=cbv_9; c8<=cbv_10; c8=c8+1) {
double scv_624, scv_625, scv_626, scv_627, scv_628, scv_629, scv_630, scv_631;
double scv_632, scv_633, scv_634, scv_635, scv_636, scv_637, scv_638, scv_639;
double scv_640, scv_641, scv_642, scv_643, scv_644, scv_645, scv_646, scv_647;
double scv_648, scv_649, scv_650, scv_651;
scv_624=B[-c7+c8-1][-c7+(c9t+2)-1];
scv_625=B[-c7+c8][-c7+(c9t+1)-1];
scv_626=X[-c7+c8][-c7+(c9t+2)];
scv_627=A[-c7+c8][-c7+(c9t+3)-1];
scv_628=X[-c7+c8][-c7+(c9t+3)-1];
scv_629=X[-c7+c8][-c7+(c9t+3)];
scv_630=B[-c7+c8-1][-c7+c9t-1];
scv_631=X[-c7+c8][-c7+(c9t+1)-1];
scv_632=B[-c7+c8][-c7+c9t-1];
scv_633=B[-c7+c8-1][-c7+(c9t+1)-1];
scv_634=X[-c7+c8][-c7+(c9t+1)];
scv_635=B[-c7+c8][-c7+(c9t+2)];
scv_636=B[-c7+c8][-c7+(c9t+1)];
scv_637=A[-c7+c8][-c7+c9t-1];
scv_638=A[-c7+c8][-c7+(c9t+3)];
scv_639=A[-c7+c8][-c7+(c9t+2)-1];
scv_640=B[-c7+c8][-c7+(c9t+3)];
scv_641=A[-c7+c8][-c7+(c9t+1)-1];
scv_642=B[-c7+c8][-c7+(c9t+3)-1];
scv_643=B[-c7+c8-1][-c7+(c9t+3)-1];
scv_644=A[-c7+c8][-c7+c9t];
scv_645=A[-c7+c8][-c7+(c9t+1)];
scv_646=B[-c7+c8][-c7+(c9t+2)-1];
scv_647=A[-c7+c8][-c7+(c9t+2)];
scv_648=X[-c7+c8][-c7+c9t];
scv_649=B[-c7+c8][-c7+c9t];
scv_650=X[-c7+c8][-c7+(c9t+2)-1];
scv_651=X[-c7+c8][-c7+c9t-1];
scv_648=scv_648-scv_651*scv_644/scv_632;
scv_634=scv_634-scv_631*scv_645/scv_625;
scv_626=scv_626-scv_650*scv_647/scv_646;
scv_629=scv_629-scv_628*scv_638/scv_642;
scv_649=scv_649-scv_644*scv_644/scv_632;
scv_636=scv_636-scv_645*scv_645/scv_625;
scv_635=scv_635-scv_647*scv_647/scv_646;
scv_640=scv_640-scv_638*scv_638/scv_642;
scv_651=scv_651-X[-c7+c8-1][-c7+c9t-1]*scv_637/scv_630;
scv_631=scv_631-X[-c7+c8-1][-c7+(c9t+1)-1]*scv_641/scv_633;
scv_650=scv_650-X[-c7+c8-1][-c7+(c9t+2)-1]*scv_639/scv_624;
scv_628=scv_628-X[-c7+c8-1][-c7+(c9t+3)-1]*scv_627/scv_643;
scv_632=scv_632-scv_637*scv_637/scv_630;
scv_625=scv_625-scv_641*scv_641/scv_633;
scv_646=scv_646-scv_639*scv_639/scv_624;
scv_642=scv_642-scv_627*scv_627/scv_643;
B[-c7+c8][-c7+(c9t+1)-1]=scv_625;
X[-c7+c8][-c7+(c9t+2)]=scv_626;
X[-c7+c8][-c7+(c9t+3)-1]=scv_628;
X[-c7+c8][-c7+(c9t+3)]=scv_629;
X[-c7+c8][-c7+(c9t+1)-1]=scv_631;
B[-c7+c8][-c7+c9t-1]=scv_632;
X[-c7+c8][-c7+(c9t+1)]=scv_634;
B[-c7+c8][-c7+(c9t+2)]=scv_635;
B[-c7+c8][-c7+(c9t+1)]=scv_636;
B[-c7+c8][-c7+(c9t+3)]=scv_640;
B[-c7+c8][-c7+(c9t+3)-1]=scv_642;
B[-c7+c8][-c7+(c9t+2)-1]=scv_646;
X[-c7+c8][-c7+c9t]=scv_648;
B[-c7+c8][-c7+c9t]=scv_649;
X[-c7+c8][-c7+(c9t+2)-1]=scv_650;
X[-c7+c8][-c7+c9t-1]=scv_651;
}
}
}
for (c9=c9t; c9<=newub_c9; c9=c9+1) {
newlb_c8=16*c2;
newub_c8=2147483647;
register int cbv_11;
cbv_11=c7t+3;
#pragma ivdep
#pragma vector always
for (c7=c7t; c7<=cbv_11; c7=c7+1) {
newub_c8=min(newub_c8,min(c7+N-1,16*c2+15));
}
for (c7=c7t; c7<=c7t+3; c7=c7+1) {
register int cbv_12, cbv_13;
cbv_12=16*c2;
cbv_13=newlb_c8-1;
#pragma ivdep
#pragma vector always
for (c8=cbv_12; c8<=cbv_13; c8=c8+1) {
double scv_652, scv_653, scv_654, scv_655, scv_656, scv_657, scv_658;
scv_652=X[-c7+c8][-c7+c9-1];
scv_653=X[-c7+c8][-c7+c9];
scv_654=B[-c7+c8][-c7+c9-1];
scv_655=B[-c7+c8][-c7+c9];
scv_656=A[-c7+c8][-c7+c9-1];
scv_657=A[-c7+c8][-c7+c9];
scv_658=B[-c7+c8-1][-c7+c9-1];
scv_653=scv_653-scv_652*scv_657/scv_654;
scv_655=scv_655-scv_657*scv_657/scv_654;
scv_652=scv_652-X[-c7+c8-1][-c7+c9-1]*scv_656/scv_658;
scv_654=scv_654-scv_656*scv_656/scv_658;
X[-c7+c8][-c7+c9-1]=scv_652;
X[-c7+c8][-c7+c9]=scv_653;
B[-c7+c8][-c7+c9-1]=scv_654;
B[-c7+c8][-c7+c9]=scv_655;
}
}
register int cbv_14;
cbv_14=newub_c8-3;
#pragma ivdep
#pragma vector always
for (c8t=newlb_c8; c8t<=cbv_14; c8t=c8t+4) {
double scv_659, scv_660, scv_661, scv_662, scv_663, scv_664, scv_665, scv_666;
double scv_667, scv_668, scv_669, scv_670, scv_671, scv_672, scv_673, scv_674;
double scv_675, scv_676, scv_677, scv_678, scv_679, scv_680, scv_681, scv_682;
double scv_683, scv_684, scv_685, scv_686, scv_687, scv_688, scv_689, scv_690;
double scv_691, scv_692, scv_693, scv_694, scv_695, scv_696, scv_697, scv_698;
double scv_699, scv_700, scv_701, scv_702, scv_703, scv_704, scv_705, scv_706;
double scv_707, scv_708, scv_709, scv_710, scv_711, scv_712, scv_713, scv_714;
double scv_715, scv_716, scv_717, scv_718, scv_719, scv_720, scv_721, scv_722;
double scv_723, scv_724, scv_725, scv_726, scv_727, scv_728, scv_729, scv_730;
double scv_731, scv_732, scv_733, scv_734, scv_735, scv_736, scv_737, scv_738;
double scv_739, scv_740, scv_741, scv_742, scv_743, scv_744, scv_745, scv_746;
double scv_747, scv_748, scv_749, scv_750, scv_751, scv_752, scv_753, scv_754;
double scv_755, scv_756, scv_757, scv_758, scv_759, scv_760, scv_761, scv_762;
double scv_763, scv_764, scv_765, scv_766, scv_767, scv_768, scv_769, scv_770;
scv_659=B[-(c7t+2)+(c8t+3)][-(c7t+2)+c9-1];
scv_660=B[-(c7t+2)+(c8t+2)][-(c7t+2)+c9];
scv_661=X[-(c7t+1)+(c8t+2)][-(c7t+1)+c9-1];
scv_662=A[-(c7t+3)+(c8t+2)][-(c7t+3)+c9-1];
scv_663=B[-c7t+c8t-1][-c7t+c9-1];
scv_664=A[-(c7t+3)+(c8t+2)][-(c7t+3)+c9];
scv_665=A[-(c7t+3)+(c8t+3)][-(c7t+3)+c9-1];
scv_666=X[-(c7t+1)+(c8t+3)][-(c7t+1)+c9];
scv_667=A[-(c7t+1)+c8t][-(c7t+1)+c9];
scv_668=B[-(c7t+3)+(c8t+3)][-(c7t+3)+c9-1];
scv_669=B[-c7t+c8t][-c7t+c9-1];
scv_670=A[-(c7t+2)+(c8t+3)][-(c7t+2)+c9-1];
scv_671=X[-(c7t+1)+c8t][-(c7t+1)+c9];
scv_672=A[-(c7t+2)+(c8t+2)][-(c7t+2)+c9];
scv_673=B[-(c7t+1)+c8t][-(c7t+1)+c9];
scv_674=B[-c7t+(c8t+2)][-c7t+c9];
scv_675=B[-(c7t+3)+(c8t+1)-1][-(c7t+3)+c9-1];
scv_676=X[-(c7t+3)+c8t][-(c7t+3)+c9-1];
scv_677=A[-(c7t+1)+c8t][-(c7t+1)+c9-1];
scv_678=X[-(c7t+3)+(c8t+2)][-(c7t+3)+c9-1];
scv_679=A[-(c7t+3)+c8t][-(c7t+3)+c9];
scv_680=X[-(c7t+1)+(c8t+2)][-(c7t+1)+c9];
scv_681=B[-(c7t+3)+(c8t+2)][-(c7t+3)+c9-1];
scv_682=B[-(c7t+2)+(c8t+1)][-(c7t+2)+c9-1];
scv_683=A[-c7t+(c8t+2)][-c7t+c9-1];
scv_684=B[-(c7t+3)+(c8t+1)][-(c7t+3)+c9-1];
scv_685=B[-(c7t+2)+(c8t+3)-1][-(c7t+2)+c9-1];
scv_686=X[-(c7t+1)+(c8t+1)][-(c7t+1)+c9-1];
scv_687=X[-(c7t+3)+c8t][-(c7t+3)+c9];
scv_688=A[-(c7t+3)+(c8t+3)][-(c7t+3)+c9];
scv_689=B[-(c7t+3)+c8t][-(c7t+3)+c9-1];
scv_690=X[-(c7t+3)+(c8t+1)][-(c7t+3)+c9];
scv_691=B[-(c7t+2)+(c8t+2)][-(c7t+2)+c9-1];
scv_692=B[-c7t+(c8t+1)-1][-c7t+c9-1];
scv_693=A[-c7t+(c8t+1)][-c7t+c9];
scv_694=B[-(c7t+3)+c8t-1][-(c7t+3)+c9-1];
scv_695=A[-(c7t+3)+c8t][-(c7t+3)+c9-1];
scv_696=X[-(c7t+3)+(c8t+3)][-(c7t+3)+c9];
scv_697=B[-(c7t+1)+(c8t+2)-1][-(c7t+1)+c9-1];
scv_698=X[-(c7t+1)+c8t][-(c7t+1)+c9-1];
scv_699=A[-(c7t+2)+(c8t+3)][-(c7t+2)+c9];
scv_700=X[-c7t+c8t][-c7t+c9-1];
scv_701=X[-(c7t+2)+c8t][-(c7t+2)+c9-1];
scv_702=B[-(c7t+3)+(c8t+2)][-(c7t+3)+c9];
scv_703=A[-(c7t+2)+(c8t+2)][-(c7t+2)+c9-1];
scv_704=B[-c7t+(c8t+3)][-c7t+c9-1];
scv_705=B[-(c7t+2)+c8t-1][-(c7t+2)+c9-1];
scv_706=A[-(c7t+1)+(c8t+2)][-(c7t+1)+c9-1];
scv_707=B[-c7t+(c8t+1)][-c7t+c9];
scv_708=X[-(c7t+2)+(c8t+1)][-(c7t+2)+c9];
scv_709=X[-(c7t+1)+(c8t+3)][-(c7t+1)+c9-1];
scv_710=X[-c7t+c8t][-c7t+c9];
scv_711=B[-c7t+(c8t+2)][-c7t+c9-1];
scv_712=B[-(c7t+3)+(c8t+2)-1][-(c7t+3)+c9-1];
scv_713=X[-c7t+(c8t+3)][-c7t+c9-1];
scv_714=X[-(c7t+2)+c8t][-(c7t+2)+c9];
scv_715=A[-c7t+(c8t+3)][-c7t+c9];
scv_716=B[-(c7t+2)+(c8t+1)][-(c7t+2)+c9];
scv_717=B[-c7t+(c8t+1)][-c7t+c9-1];
scv_718=A[-c7t+c8t][-c7t+c9-1];
scv_719=X[-(c7t+2)+(c8t+1)][-(c7t+2)+c9-1];
scv_720=X[-c7t+(c8t+2)][-c7t+c9];
scv_721=B[-(c7t+1)+(c8t+1)-1][-(c7t+1)+c9-1];
scv_722=X[-c7t+(c8t+1)][-c7t+c9];
scv_723=B[-(c7t+1)+(c8t+3)][-(c7t+1)+c9-1];
scv_724=X[-(c7t+3)+(c8t+1)][-(c7t+3)+c9-1];
scv_725=B[-(c7t+3)+(c8t+3)][-(c7t+3)+c9];
scv_726=B[-(c7t+2)+c8t][-(c7t+2)+c9];
scv_727=A[-(c7t+2)+c8t][-(c7t+2)+c9];
scv_728=B[-c7t+(c8t+3)-1][-c7t+c9-1];
scv_729=X[-c7t+(c8t+3)][-c7t+c9];
scv_730=X[-(c7t+2)+(c8t+3)][-(c7t+2)+c9-1];
scv_731=A[-(c7t+2)+c8t][-(c7t+2)+c9-1];
scv_732=B[-(c7t+1)+(c8t+3)][-(c7t+1)+c9];
scv_733=X[-(c7t+3)+(c8t+3)][-(c7t+3)+c9-1];
scv_734=X[-c7t+(c8t+2)][-c7t+c9-1];
scv_735=B[-c7t+c8t][-c7t+c9];
scv_736=X[-c7t+(c8t+1)][-c7t+c9-1];
scv_737=B[-(c7t+3)+(c8t+1)][-(c7t+3)+c9];
scv_738=A[-(c7t+2)+(c8t+1)][-(c7t+2)+c9-1];
scv_739=B[-(c7t+2)+(c8t+3)][-(c7t+2)+c9];
scv_740=B[-c7t+(c8t+2)-1][-c7t+c9-1];
scv_741=B[-(c7t+2)+c8t][-(c7t+2)+c9-1];
scv_742=A[-(c7t+3)+(c8t+1)][-(c7t+3)+c9];
scv_743=B[-(c7t+2)+(c8t+2)-1][-(c7t+2)+c9-1];
scv_744=X[-(c7t+3)+(c8t+2)][-(c7t+3)+c9];
scv_745=B[-(c7t+1)+c8t][-(c7t+1)+c9-1];
scv_746=B[-(c7t+1)+(c8t+2)][-(c7t+1)+c9-1];
scv_747=A[-c7t+(c8t+2)][-c7t+c9];
scv_748=X[-(c7t+2)+(c8t+2)][-(c7t+2)+c9-1];
scv_749=A[-c7t+c8t][-c7t+c9];
scv_750=A[-(c7t+2)+(c8t+1)][-(c7t+2)+c9];
scv_751=B[-(c7t+1)+c8t-1][-(c7t+1)+c9-1];
scv_752=X[-(c7t+2)+(c8t+3)][-(c7t+2)+c9];
scv_753=B[-(c7t+1)+(c8t+1)][-(c7t+1)+c9];
scv_754=B[-(c7t+1)+(c8t+2)][-(c7t+1)+c9];
scv_755=X[-(c7t+2)+(c8t+2)][-(c7t+2)+c9];
scv_756=A[-c7t+(c8t+1)][-c7t+c9-1];
scv_757=B[-(c7t+2)+(c8t+1)-1][-(c7t+2)+c9-1];
scv_758=X[-(c7t+1)+(c8t+1)][-(c7t+1)+c9];
scv_759=B[-(c7t+3)+c8t][-(c7t+3)+c9];
scv_760=B[-c7t+(c8t+3)][-c7t+c9];
scv_761=B[-(c7t+1)+(c8t+1)][-(c7t+1)+c9-1];
scv_762=B[-(c7t+1)+(c8t+3)-1][-(c7t+1)+c9-1];
scv_763=A[-(c7t+1)+(c8t+3)][-(c7t+1)+c9];
scv_764=A[-(c7t+1)+(c8t+1)][-(c7t+1)+c9-1];
scv_765=B[-(c7t+3)+(c8t+3)-1][-(c7t+3)+c9-1];
scv_766=A[-(c7t+3)+(c8t+1)][-(c7t+3)+c9-1];
scv_767=A[-c7t+(c8t+3)][-c7t+c9-1];
scv_768=A[-(c7t+1)+(c8t+2)][-(c7t+1)+c9];
scv_769=A[-(c7t+1)+(c8t+1)][-(c7t+1)+c9];
scv_770=A[-(c7t+1)+(c8t+3)][-(c7t+1)+c9-1];
scv_710=scv_710-scv_700*scv_749/scv_669;
scv_722=scv_722-scv_736*scv_693/scv_717;
scv_720=scv_720-scv_734*scv_747/scv_711;
scv_729=scv_729-scv_713*scv_715/scv_704;
scv_671=scv_671-scv_698*scv_667/scv_745;
scv_758=scv_758-scv_686*scv_769/scv_761;
scv_680=scv_680-scv_661*scv_768/scv_746;
scv_666=scv_666-scv_709*scv_763/scv_723;
scv_714=scv_714-scv_701*scv_727/scv_741;
scv_708=scv_708-scv_719*scv_750/scv_682;
scv_755=scv_755-scv_748*scv_672/scv_691;
scv_752=scv_752-scv_730*scv_699/scv_659;
scv_687=scv_687-scv_676*scv_679/scv_689;
scv_690=scv_690-scv_724*scv_742/scv_684;
scv_744=scv_744-scv_678*scv_664/scv_681;
scv_696=scv_696-scv_733*scv_688/scv_668;
scv_735=scv_735-scv_749*scv_749/scv_669;
scv_707=scv_707-scv_693*scv_693/scv_717;
scv_674=scv_674-scv_747*scv_747/scv_711;
scv_760=scv_760-scv_715*scv_715/scv_704;
scv_673=scv_673-scv_667*scv_667/scv_745;
scv_753=scv_753-scv_769*scv_769/scv_761;
scv_754=scv_754-scv_768*scv_768/scv_746;
scv_732=scv_732-scv_763*scv_763/scv_723;
scv_726=scv_726-scv_727*scv_727/scv_741;
scv_716=scv_716-scv_750*scv_750/scv_682;
scv_660=scv_660-scv_672*scv_672/scv_691;
scv_739=scv_739-scv_699*scv_699/scv_659;
scv_759=scv_759-scv_679*scv_679/scv_689;
scv_737=scv_737-scv_742*scv_742/scv_684;
scv_702=scv_702-scv_664*scv_664/scv_681;
scv_725=scv_725-scv_688*scv_688/scv_668;
scv_700=scv_700-X[-c7t+c8t-1][-c7t+c9-1]*scv_718/scv_663;
scv_736=scv_736-X[-c7t+(c8t+1)-1][-c7t+c9-1]*scv_756/scv_692;
scv_734=scv_734-X[-c7t+(c8t+2)-1][-c7t+c9-1]*scv_683/scv_740;
scv_713=scv_713-X[-c7t+(c8t+3)-1][-c7t+c9-1]*scv_767/scv_728;
scv_698=scv_698-X[-(c7t+1)+c8t-1][-(c7t+1)+c9-1]*scv_677/scv_751;
scv_686=scv_686-X[-(c7t+1)+(c8t+1)-1][-(c7t+1)+c9-1]*scv_764/scv_721;
scv_661=scv_661-X[-(c7t+1)+(c8t+2)-1][-(c7t+1)+c9-1]*scv_706/scv_697;
scv_709=scv_709-X[-(c7t+1)+(c8t+3)-1][-(c7t+1)+c9-1]*scv_770/scv_762;
scv_701=scv_701-X[-(c7t+2)+c8t-1][-(c7t+2)+c9-1]*scv_731/scv_705;
scv_719=scv_719-X[-(c7t+2)+(c8t+1)-1][-(c7t+2)+c9-1]*scv_738/scv_757;
scv_748=scv_748-X[-(c7t+2)+(c8t+2)-1][-(c7t+2)+c9-1]*scv_703/scv_743;
scv_730=scv_730-X[-(c7t+2)+(c8t+3)-1][-(c7t+2)+c9-1]*scv_670/scv_685;
scv_676=scv_676-X[-(c7t+3)+c8t-1][-(c7t+3)+c9-1]*scv_695/scv_694;
scv_724=scv_724-X[-(c7t+3)+(c8t+1)-1][-(c7t+3)+c9-1]*scv_766/scv_675;
scv_678=scv_678-X[-(c7t+3)+(c8t+2)-1][-(c7t+3)+c9-1]*scv_662/scv_712;
scv_733=scv_733-X[-(c7t+3)+(c8t+3)-1][-(c7t+3)+c9-1]*scv_665/scv_765;
scv_669=scv_669-scv_718*scv_718/scv_663;
scv_717=scv_717-scv_756*scv_756/scv_692;
scv_711=scv_711-scv_683*scv_683/scv_740;
scv_704=scv_704-scv_767*scv_767/scv_728;
scv_745=scv_745-scv_677*scv_677/scv_751;
scv_761=scv_761-scv_764*scv_764/scv_721;
scv_746=scv_746-scv_706*scv_706/scv_697;
scv_723=scv_723-scv_770*scv_770/scv_762;
scv_741=scv_741-scv_731*scv_731/scv_705;
scv_682=scv_682-scv_738*scv_738/scv_757;
scv_691=scv_691-scv_703*scv_703/scv_743;
scv_659=scv_659-scv_670*scv_670/scv_685;
scv_689=scv_689-scv_695*scv_695/scv_694;
scv_684=scv_684-scv_766*scv_766/scv_675;
scv_681=scv_681-scv_662*scv_662/scv_712;
scv_668=scv_668-scv_665*scv_665/scv_765;
B[-(c7t+2)+(c8t+3)][-(c7t+2)+c9-1]=scv_659;
B[-(c7t+2)+(c8t+2)][-(c7t+2)+c9]=scv_660;
X[-(c7t+1)+(c8t+2)][-(c7t+1)+c9-1]=scv_661;
X[-(c7t+1)+(c8t+3)][-(c7t+1)+c9]=scv_666;
B[-(c7t+3)+(c8t+3)][-(c7t+3)+c9-1]=scv_668;
B[-c7t+c8t][-c7t+c9-1]=scv_669;
X[-(c7t+1)+c8t][-(c7t+1)+c9]=scv_671;
B[-(c7t+1)+c8t][-(c7t+1)+c9]=scv_673;
B[-c7t+(c8t+2)][-c7t+c9]=scv_674;
X[-(c7t+3)+c8t][-(c7t+3)+c9-1]=scv_676;
X[-(c7t+3)+(c8t+2)][-(c7t+3)+c9-1]=scv_678;
X[-(c7t+1)+(c8t+2)][-(c7t+1)+c9]=scv_680;
B[-(c7t+3)+(c8t+2)][-(c7t+3)+c9-1]=scv_681;
B[-(c7t+2)+(c8t+1)][-(c7t+2)+c9-1]=scv_682;
B[-(c7t+3)+(c8t+1)][-(c7t+3)+c9-1]=scv_684;
X[-(c7t+1)+(c8t+1)][-(c7t+1)+c9-1]=scv_686;
X[-(c7t+3)+c8t][-(c7t+3)+c9]=scv_687;
B[-(c7t+3)+c8t][-(c7t+3)+c9-1]=scv_689;
X[-(c7t+3)+(c8t+1)][-(c7t+3)+c9]=scv_690;
B[-(c7t+2)+(c8t+2)][-(c7t+2)+c9-1]=scv_691;
X[-(c7t+3)+(c8t+3)][-(c7t+3)+c9]=scv_696;
X[-(c7t+1)+c8t][-(c7t+1)+c9-1]=scv_698;
X[-c7t+c8t][-c7t+c9-1]=scv_700;
X[-(c7t+2)+c8t][-(c7t+2)+c9-1]=scv_701;
B[-(c7t+3)+(c8t+2)][-(c7t+3)+c9]=scv_702;
B[-c7t+(c8t+3)][-c7t+c9-1]=scv_704;
B[-c7t+(c8t+1)][-c7t+c9]=scv_707;
X[-(c7t+2)+(c8t+1)][-(c7t+2)+c9]=scv_708;
X[-(c7t+1)+(c8t+3)][-(c7t+1)+c9-1]=scv_709;
X[-c7t+c8t][-c7t+c9]=scv_710;
B[-c7t+(c8t+2)][-c7t+c9-1]=scv_711;
X[-c7t+(c8t+3)][-c7t+c9-1]=scv_713;
X[-(c7t+2)+c8t][-(c7t+2)+c9]=scv_714;
B[-(c7t+2)+(c8t+1)][-(c7t+2)+c9]=scv_716;
B[-c7t+(c8t+1)][-c7t+c9-1]=scv_717;
X[-(c7t+2)+(c8t+1)][-(c7t+2)+c9-1]=scv_719;
X[-c7t+(c8t+2)][-c7t+c9]=scv_720;
X[-c7t+(c8t+1)][-c7t+c9]=scv_722;
B[-(c7t+1)+(c8t+3)][-(c7t+1)+c9-1]=scv_723;
X[-(c7t+3)+(c8t+1)][-(c7t+3)+c9-1]=scv_724;
B[-(c7t+3)+(c8t+3)][-(c7t+3)+c9]=scv_725;
B[-(c7t+2)+c8t][-(c7t+2)+c9]=scv_726;
X[-c7t+(c8t+3)][-c7t+c9]=scv_729;
X[-(c7t+2)+(c8t+3)][-(c7t+2)+c9-1]=scv_730;
B[-(c7t+1)+(c8t+3)][-(c7t+1)+c9]=scv_732;
X[-(c7t+3)+(c8t+3)][-(c7t+3)+c9-1]=scv_733;
X[-c7t+(c8t+2)][-c7t+c9-1]=scv_734;
B[-c7t+c8t][-c7t+c9]=scv_735;
X[-c7t+(c8t+1)][-c7t+c9-1]=scv_736;
B[-(c7t+3)+(c8t+1)][-(c7t+3)+c9]=scv_737;
B[-(c7t+2)+(c8t+3)][-(c7t+2)+c9]=scv_739;
B[-(c7t+2)+c8t][-(c7t+2)+c9-1]=scv_741;
X[-(c7t+3)+(c8t+2)][-(c7t+3)+c9]=scv_744;
B[-(c7t+1)+c8t][-(c7t+1)+c9-1]=scv_745;
B[-(c7t+1)+(c8t+2)][-(c7t+1)+c9-1]=scv_746;
X[-(c7t+2)+(c8t+2)][-(c7t+2)+c9-1]=scv_748;
X[-(c7t+2)+(c8t+3)][-(c7t+2)+c9]=scv_752;
B[-(c7t+1)+(c8t+1)][-(c7t+1)+c9]=scv_753;
B[-(c7t+1)+(c8t+2)][-(c7t+1)+c9]=scv_754;
X[-(c7t+2)+(c8t+2)][-(c7t+2)+c9]=scv_755;
X[-(c7t+1)+(c8t+1)][-(c7t+1)+c9]=scv_758;
B[-(c7t+3)+c8t][-(c7t+3)+c9]=scv_759;
B[-c7t+(c8t+3)][-c7t+c9]=scv_760;
B[-(c7t+1)+(c8t+1)][-(c7t+1)+c9-1]=scv_761;
}
#pragma ivdep
#pragma vector always
for (c8=c8t; c8<=newub_c8; c8=c8+1) {
double scv_771, scv_772, scv_773, scv_774, scv_775, scv_776, scv_777, scv_778;
double scv_779, scv_780, scv_781, scv_782, scv_783, scv_784, scv_785, scv_786;
double scv_787, scv_788, scv_789, scv_790, scv_791, scv_792, scv_793, scv_794;
double scv_795, scv_796, scv_797, scv_798;
scv_771=X[-(c7t+1)+c8][-(c7t+1)+c9];
scv_772=B[-(c7t+1)+c8-1][-(c7t+1)+c9-1];
scv_773=X[-(c7t+3)+c8][-(c7t+3)+c9];
scv_774=B[-(c7t+2)+c8][-(c7t+2)+c9];
scv_775=A[-(c7t+2)+c8][-(c7t+2)+c9-1];
scv_776=A[-c7t+c8][-c7t+c9];
scv_777=B[-c7t+c8][-c7t+c9];
scv_778=X[-c7t+c8][-c7t+c9];
scv_779=B[-(c7t+2)+c8-1][-(c7t+2)+c9-1];
scv_780=A[-(c7t+3)+c8][-(c7t+3)+c9-1];
scv_781=B[-(c7t+1)+c8][-(c7t+1)+c9];
scv_782=X[-(c7t+2)+c8][-(c7t+2)+c9];
scv_783=B[-c7t+c8-1][-c7t+c9-1];
scv_784=B[-c7t+c8][-c7t+c9-1];
scv_785=A[-(c7t+1)+c8][-(c7t+1)+c9];
scv_786=B[-(c7t+3)+c8][-(c7t+3)+c9];
scv_787=X[-(c7t+3)+c8][-(c7t+3)+c9-1];
scv_788=B[-(c7t+3)+c8-1][-(c7t+3)+c9-1];
scv_789=B[-(c7t+3)+c8][-(c7t+3)+c9-1];
scv_790=A[-c7t+c8][-c7t+c9-1];
scv_791=B[-(c7t+1)+c8][-(c7t+1)+c9-1];
scv_792=X[-c7t+c8][-c7t+c9-1];
scv_793=A[-(c7t+3)+c8][-(c7t+3)+c9];
scv_794=A[-(c7t+2)+c8][-(c7t+2)+c9];
scv_795=B[-(c7t+2)+c8][-(c7t+2)+c9-1];
scv_796=A[-(c7t+1)+c8][-(c7t+1)+c9-1];
scv_797=X[-(c7t+1)+c8][-(c7t+1)+c9-1];
scv_798=X[-(c7t+2)+c8][-(c7t+2)+c9-1];
scv_778=scv_778-scv_792*scv_776/scv_784;
scv_771=scv_771-scv_797*scv_785/scv_791;
scv_782=scv_782-scv_798*scv_794/scv_795;
scv_773=scv_773-scv_787*scv_793/scv_789;
scv_777=scv_777-scv_776*scv_776/scv_784;
scv_781=scv_781-scv_785*scv_785/scv_791;
scv_774=scv_774-scv_794*scv_794/scv_795;
scv_786=scv_786-scv_793*scv_793/scv_789;
scv_792=scv_792-X[-c7t+c8-1][-c7t+c9-1]*scv_790/scv_783;
scv_797=scv_797-X[-(c7t+1)+c8-1][-(c7t+1)+c9-1]*scv_796/scv_772;
scv_798=scv_798-X[-(c7t+2)+c8-1][-(c7t+2)+c9-1]*scv_775/scv_779;
scv_787=scv_787-X[-(c7t+3)+c8-1][-(c7t+3)+c9-1]*scv_780/scv_788;
scv_784=scv_784-scv_790*scv_790/scv_783;
scv_791=scv_791-scv_796*scv_796/scv_772;
scv_795=scv_795-scv_775*scv_775/scv_779;
scv_789=scv_789-scv_780*scv_780/scv_788;
X[-(c7t+1)+c8][-(c7t+1)+c9]=scv_771;
X[-(c7t+3)+c8][-(c7t+3)+c9]=scv_773;
B[-(c7t+2)+c8][-(c7t+2)+c9]=scv_774;
B[-c7t+c8][-c7t+c9]=scv_777;
X[-c7t+c8][-c7t+c9]=scv_778;
B[-(c7t+1)+c8][-(c7t+1)+c9]=scv_781;
X[-(c7t+2)+c8][-(c7t+2)+c9]=scv_782;
B[-c7t+c8][-c7t+c9-1]=scv_784;
B[-(c7t+3)+c8][-(c7t+3)+c9]=scv_786;
X[-(c7t+3)+c8][-(c7t+3)+c9-1]=scv_787;
B[-(c7t+3)+c8][-(c7t+3)+c9-1]=scv_789;
B[-(c7t+1)+c8][-(c7t+1)+c9-1]=scv_791;
X[-c7t+c8][-c7t+c9-1]=scv_792;
B[-(c7t+2)+c8][-(c7t+2)+c9-1]=scv_795;
X[-(c7t+1)+c8][-(c7t+1)+c9-1]=scv_797;
X[-(c7t+2)+c8][-(c7t+2)+c9-1]=scv_798;
}
for (c7=c7t; c7<=c7t+3; c7=c7+1) {
register int cbv_15, cbv_16;
cbv_15=newub_c8+1;
cbv_16=min(c7+N-1,16*c2+15);
#pragma ivdep
#pragma vector always
for (c8=cbv_15; c8<=cbv_16; c8=c8+1) {
double scv_799, scv_800, scv_801, scv_802, scv_803, scv_804, scv_805;
scv_799=X[-c7+c8][-c7+c9-1];
scv_800=X[-c7+c8][-c7+c9];
scv_801=B[-c7+c8][-c7+c9-1];
scv_802=B[-c7+c8][-c7+c9];
scv_803=A[-c7+c8][-c7+c9-1];
scv_804=A[-c7+c8][-c7+c9];
scv_805=B[-c7+c8-1][-c7+c9-1];
scv_800=scv_800-scv_799*scv_804/scv_801;
scv_802=scv_802-scv_804*scv_804/scv_801;
scv_799=scv_799-X[-c7+c8-1][-c7+c9-1]*scv_803/scv_805;
scv_801=scv_801-scv_803*scv_803/scv_805;
X[-c7+c8][-c7+c9-1]=scv_799;
X[-c7+c8][-c7+c9]=scv_800;
B[-c7+c8][-c7+c9-1]=scv_801;
B[-c7+c8][-c7+c9]=scv_802;
}
}
}
for (c7=c7t; c7<=c7t+3; c7=c7+1) {
for (c9=newub_c9+1; c9<=16*c6+15; c9=c9+1) {
register int cbv_17, cbv_18;
cbv_17=16*c2;
cbv_18=min(c7+N-1,16*c2+15)-3;
#pragma ivdep
#pragma vector always
for (c8t=cbv_17; c8t<=cbv_18; c8t=c8t+4) {
double scv_806, scv_807, scv_808, scv_809, scv_810, scv_811, scv_812, scv_813;
double scv_814, scv_815, scv_816, scv_817, scv_818, scv_819, scv_820, scv_821;
double scv_822, scv_823, scv_824, scv_825, scv_826, scv_827, scv_828, scv_829;
double scv_830, scv_831, scv_832, scv_833;
scv_806=X[-c7+(c8t+2)][-c7+c9-1];
scv_807=X[-c7+(c8t+1)][-c7+c9-1];
scv_808=B[-c7+(c8t+1)][-c7+c9];
scv_809=B[-c7+c8t][-c7+c9];
scv_810=B[-c7+(c8t+3)-1][-c7+c9-1];
scv_811=A[-c7+c8t][-c7+c9];
scv_812=B[-c7+c8t-1][-c7+c9-1];
scv_813=B[-c7+(c8t+2)][-c7+c9];
scv_814=B[-c7+(c8t+2)][-c7+c9-1];
scv_815=X[-c7+(c8t+3)][-c7+c9];
scv_816=A[-c7+(c8t+1)][-c7+c9];
scv_817=X[-c7+c8t][-c7+c9];
scv_818=B[-c7+(c8t+1)][-c7+c9-1];
scv_819=X[-c7+(c8t+1)][-c7+c9];
scv_820=B[-c7+(c8t+1)-1][-c7+c9-1];
scv_821=A[-c7+(c8t+1)][-c7+c9-1];
scv_822=B[-c7+(c8t+3)][-c7+c9-1];
scv_823=X[-c7+(c8t+3)][-c7+c9-1];
scv_824=B[-c7+c8t][-c7+c9-1];
scv_825=A[-c7+(c8t+3)][-c7+c9];
scv_826=A[-c7+(c8t+3)][-c7+c9-1];
scv_827=A[-c7+c8t][-c7+c9-1];
scv_828=B[-c7+(c8t+3)][-c7+c9];
scv_829=X[-c7+c8t][-c7+c9-1];
scv_830=A[-c7+(c8t+2)][-c7+c9-1];
scv_831=B[-c7+(c8t+2)-1][-c7+c9-1];
scv_832=A[-c7+(c8t+2)][-c7+c9];
scv_833=X[-c7+(c8t+2)][-c7+c9];
scv_817=scv_817-scv_829*scv_811/scv_824;
scv_819=scv_819-scv_807*scv_816/scv_818;
scv_833=scv_833-scv_806*scv_832/scv_814;
scv_815=scv_815-scv_823*scv_825/scv_822;
scv_809=scv_809-scv_811*scv_811/scv_824;
scv_808=scv_808-scv_816*scv_816/scv_818;
scv_813=scv_813-scv_832*scv_832/scv_814;
scv_828=scv_828-scv_825*scv_825/scv_822;
scv_829=scv_829-X[-c7+c8t-1][-c7+c9-1]*scv_827/scv_812;
scv_807=scv_807-X[-c7+(c8t+1)-1][-c7+c9-1]*scv_821/scv_820;
scv_806=scv_806-X[-c7+(c8t+2)-1][-c7+c9-1]*scv_830/scv_831;
scv_823=scv_823-X[-c7+(c8t+3)-1][-c7+c9-1]*scv_826/scv_810;
scv_824=scv_824-scv_827*scv_827/scv_812;
scv_818=scv_818-scv_821*scv_821/scv_820;
scv_814=scv_814-scv_830*scv_830/scv_831;
scv_822=scv_822-scv_826*scv_826/scv_810;
X[-c7+(c8t+2)][-c7+c9-1]=scv_806;
X[-c7+(c8t+1)][-c7+c9-1]=scv_807;
B[-c7+(c8t+1)][-c7+c9]=scv_808;
B[-c7+c8t][-c7+c9]=scv_809;
B[-c7+(c8t+2)][-c7+c9]=scv_813;
B[-c7+(c8t+2)][-c7+c9-1]=scv_814;
X[-c7+(c8t+3)][-c7+c9]=scv_815;
X[-c7+c8t][-c7+c9]=scv_817;
B[-c7+(c8t+1)][-c7+c9-1]=scv_818;
X[-c7+(c8t+1)][-c7+c9]=scv_819;
B[-c7+(c8t+3)][-c7+c9-1]=scv_822;
X[-c7+(c8t+3)][-c7+c9-1]=scv_823;
B[-c7+c8t][-c7+c9-1]=scv_824;
B[-c7+(c8t+3)][-c7+c9]=scv_828;
X[-c7+c8t][-c7+c9-1]=scv_829;
X[-c7+(c8t+2)][-c7+c9]=scv_833;
}
register int cbv_19;
cbv_19=min(c7+N-1,16*c2+15);
#pragma ivdep
#pragma vector always
for (c8=c8t; c8<=cbv_19; c8=c8+1) {
double scv_834, scv_835, scv_836, scv_837, scv_838, scv_839, scv_840;
scv_834=X[-c7+c8][-c7+c9-1];
scv_835=X[-c7+c8][-c7+c9];
scv_836=B[-c7+c8][-c7+c9-1];
scv_837=B[-c7+c8][-c7+c9];
scv_838=A[-c7+c8][-c7+c9-1];
scv_839=A[-c7+c8][-c7+c9];
scv_840=B[-c7+c8-1][-c7+c9-1];
scv_835=scv_835-scv_834*scv_839/scv_836;
scv_837=scv_837-scv_839*scv_839/scv_836;
scv_834=scv_834-X[-c7+c8-1][-c7+c9-1]*scv_838/scv_840;
scv_836=scv_836-scv_838*scv_838/scv_840;
X[-c7+c8][-c7+c9-1]=scv_834;
X[-c7+c8][-c7+c9]=scv_835;
B[-c7+c8][-c7+c9-1]=scv_836;
B[-c7+c8][-c7+c9]=scv_837;
}
}
}
}
for (c7=c7t; c7<=min(min(min(T-1,32*c1-32*c2+31),16*c6+14),16*c2-1); c7=c7+1) {
for (c9t=max(16*c6,c7+1); c9t<=16*c6+12; c9t=c9t+4) {
register int cbv_20, cbv_21;
cbv_20=16*c2;
cbv_21=min(c7+N-1,16*c2+15)-3;
#pragma ivdep
#pragma vector always
for (c8t=cbv_20; c8t<=cbv_21; c8t=c8t+4) {
double scv_841, scv_842, scv_843, scv_844, scv_845, scv_846, scv_847, scv_848;
double scv_849, scv_850, scv_851, scv_852, scv_853, scv_854, scv_855, scv_856;
double scv_857, scv_858, scv_859, scv_860, scv_861, scv_862, scv_863, scv_864;
double scv_865, scv_866, scv_867, scv_868, scv_869, scv_870, scv_871, scv_872;
double scv_873, scv_874, scv_875, scv_876, scv_877, scv_878, scv_879, scv_880;
double scv_881, scv_882, scv_883, scv_884, scv_885, scv_886, scv_887, scv_888;
double scv_889, scv_890, scv_891, scv_892, scv_893, scv_894, scv_895, scv_896;
double scv_897, scv_898, scv_899, scv_900, scv_901, scv_902, scv_903, scv_904;
double scv_905, scv_906, scv_907, scv_908, scv_909, scv_910, scv_911, scv_912;
double scv_913, scv_914, scv_915, scv_916, scv_917, scv_918, scv_919, scv_920;
double scv_921, scv_922, scv_923, scv_924, scv_925, scv_926, scv_927, scv_928;
double scv_929, scv_930, scv_931, scv_932, scv_933, scv_934, scv_935, scv_936;
double scv_937, scv_938, scv_939, scv_940, scv_941, scv_942, scv_943, scv_944;
double scv_945, scv_946, scv_947, scv_948, scv_949, scv_950, scv_951, scv_952;
scv_841=A[-c7+c8t][-c7+c9t];
scv_842=X[-c7+(c8t+2)][-c7+(c9t+2)-1];
scv_843=A[-c7+(c8t+1)][-c7+(c9t+3)-1];
scv_844=B[-c7+(c8t+3)][-c7+(c9t+3)];
scv_845=B[-c7+(c8t+1)][-c7+(c9t+1)-1];
scv_846=B[-c7+(c8t+2)][-c7+(c9t+2)-1];
scv_847=B[-c7+(c8t+1)-1][-c7+(c9t+1)-1];
scv_848=X[-c7+(c8t+2)][-c7+(c9t+1)-1];
scv_849=A[-c7+c8t][-c7+c9t-1];
scv_850=B[-c7+(c8t+3)-1][-c7+(c9t+3)-1];
scv_851=A[-c7+(c8t+2)][-c7+(c9t+3)];
scv_852=B[-c7+c8t-1][-c7+(c9t+1)-1];
scv_853=B[-c7+(c8t+2)][-c7+(c9t+3)-1];
scv_854=X[-c7+(c8t+3)][-c7+(c9t+1)-1];
scv_855=A[-c7+c8t][-c7+(c9t+1)-1];
scv_856=B[-c7+(c8t+1)][-c7+(c9t+1)];
scv_857=B[-c7+(c8t+1)-1][-c7+c9t-1];
scv_858=A[-c7+c8t][-c7+(c9t+2)];
scv_859=A[-c7+(c8t+3)][-c7+(c9t+2)];
scv_860=A[-c7+(c8t+3)][-c7+(c9t+3)-1];
scv_861=B[-c7+c8t][-c7+(c9t+2)-1];
scv_862=A[-c7+(c8t+3)][-c7+(c9t+1)-1];
scv_863=B[-c7+(c8t+3)][-c7+(c9t+1)-1];
scv_864=X[-c7+(c8t+1)][-c7+(c9t+2)-1];
scv_865=B[-c7+(c8t+2)-1][-c7+c9t-1];
scv_866=A[-c7+(c8t+1)][-c7+(c9t+1)-1];
scv_867=B[-c7+(c8t+1)][-c7+(c9t+3)];
scv_868=A[-c7+(c8t+2)][-c7+(c9t+1)-1];
scv_869=X[-c7+(c8t+1)][-c7+c9t-1];
scv_870=X[-c7+(c8t+1)][-c7+c9t];
scv_871=B[-c7+(c8t+1)-1][-c7+(c9t+3)-1];
scv_872=A[-c7+c8t][-c7+(c9t+2)-1];
scv_873=X[-c7+(c8t+1)][-c7+(c9t+1)-1];
scv_874=B[-c7+c8t][-c7+c9t-1];
scv_875=B[-c7+c8t][-c7+(c9t+1)-1];
scv_876=B[-c7+(c8t+2)][-c7+(c9t+2)];
scv_877=X[-c7+c8t][-c7+(c9t+2)];
scv_878=A[-c7+(c8t+1)][-c7+(c9t+2)];
scv_879=B[-c7+c8t-1][-c7+c9t-1];
scv_880=B[-c7+c8t][-c7+(c9t+3)-1];
scv_881=B[-c7+(c8t+3)-1][-c7+(c9t+2)-1];
scv_882=X[-c7+c8t][-c7+(c9t+1)];
scv_883=A[-c7+(c8t+1)][-c7+(c9t+1)];
scv_884=B[-c7+c8t][-c7+(c9t+3)];
scv_885=X[-c7+(c8t+1)][-c7+(c9t+1)];
scv_886=A[-c7+c8t][-c7+(c9t+3)-1];
scv_887=X[-c7+(c8t+2)][-c7+(c9t+3)];
scv_888=X[-c7+(c8t+3)][-c7+(c9t+1)];
scv_889=X[-c7+c8t][-c7+(c9t+1)-1];
scv_890=A[-c7+(c8t+2)][-c7+(c9t+3)-1];
scv_891=X[-c7+(c8t+1)][-c7+(c9t+2)];
scv_892=B[-c7+(c8t+3)][-c7+c9t-1];
scv_893=X[-c7+(c8t+3)][-c7+(c9t+2)];
scv_894=B[-c7+(c8t+2)-1][-c7+(c9t+1)-1];
scv_895=A[-c7+(c8t+3)][-c7+c9t-1];
scv_896=B[-c7+(c8t+2)-1][-c7+(c9t+2)-1];
scv_897=B[-c7+(c8t+1)][-c7+(c9t+2)];
scv_898=B[-c7+(c8t+3)][-c7+(c9t+1)];
scv_899=B[-c7+(c8t+1)][-c7+c9t-1];
scv_900=B[-c7+(c8t+2)-1][-c7+(c9t+3)-1];
scv_901=A[-c7+(c8t+1)][-c7+c9t-1];
scv_902=X[-c7+(c8t+3)][-c7+c9t];
scv_903=B[-c7+(c8t+3)][-c7+(c9t+2)];
scv_904=B[-c7+(c8t+1)-1][-c7+(c9t+2)-1];
scv_905=B[-c7+(c8t+2)][-c7+(c9t+1)-1];
scv_906=X[-c7+(c8t+2)][-c7+c9t-1];
scv_907=B[-c7+(c8t+1)][-c7+(c9t+3)-1];
scv_908=A[-c7+(c8t+3)][-c7+(c9t+3)];
scv_909=B[-c7+(c8t+3)][-c7+(c9t+3)-1];
scv_910=X[-c7+(c8t+3)][-c7+(c9t+2)-1];
scv_911=X[-c7+(c8t+2)][-c7+(c9t+2)];
scv_912=B[-c7+c8t][-c7+c9t];
scv_913=A[-c7+(c8t+2)][-c7+c9t];
scv_914=A[-c7+c8t][-c7+(c9t+3)];
scv_915=B[-c7+(c8t+1)][-c7+(c9t+2)-1];
scv_916=B[-c7+(c8t+3)-1][-c7+(c9t+1)-1];
scv_917=A[-c7+(c8t+1)][-c7+c9t];
scv_918=X[-c7+(c8t+2)][-c7+(c9t+3)-1];
scv_919=B[-c7+c8t][-c7+(c9t+2)];
scv_920=X[-c7+(c8t+3)][-c7+c9t-1];
scv_921=A[-c7+(c8t+2)][-c7+c9t-1];
scv_922=X[-c7+c8t][-c7+(c9t+2)-1];
scv_923=B[-c7+(c8t+3)-1][-c7+c9t-1];
scv_924=B[-c7+(c8t+2)][-c7+c9t];
scv_925=X[-c7+(c8t+2)][-c7+c9t];
scv_926=X[-c7+(c8t+3)][-c7+(c9t+3)-1];
scv_927=B[-c7+(c8t+1)][-c7+c9t];
scv_928=A[-c7+(c8t+1)][-c7+(c9t+2)-1];
scv_929=X[-c7+(c8t+1)][-c7+(c9t+3)];
scv_930=A[-c7+(c8t+3)][-c7+c9t];
scv_931=B[-c7+(c8t+2)][-c7+(c9t+3)];
scv_932=B[-c7+c8t-1][-c7+(c9t+3)-1];
scv_933=B[-c7+(c8t+2)][-c7+(c9t+1)];
scv_934=A[-c7+(c8t+3)][-c7+(c9t+2)-1];
scv_935=A[-c7+(c8t+3)][-c7+(c9t+1)];
scv_936=X[-c7+c8t][-c7+(c9t+3)];
scv_937=B[-c7+(c8t+3)][-c7+(c9t+2)-1];
scv_938=B[-c7+(c8t+3)][-c7+c9t];
scv_939=A[-c7+(c8t+2)][-c7+(c9t+1)];
scv_940=B[-c7+c8t][-c7+(c9t+1)];
scv_941=X[-c7+c8t][-c7+(c9t+3)-1];
scv_942=X[-c7+c8t][-c7+c9t];
scv_943=B[-c7+(c8t+2)][-c7+c9t-1];
scv_944=X[-c7+(c8t+2)][-c7+(c9t+1)];
scv_945=A[-c7+(c8t+2)][-c7+(c9t+2)];
scv_946=A[-c7+(c8t+2)][-c7+(c9t+2)-1];
scv_947=A[-c7+(c8t+1)][-c7+(c9t+3)];
scv_948=B[-c7+c8t-1][-c7+(c9t+2)-1];
scv_949=A[-c7+c8t][-c7+(c9t+1)];
scv_950=X[-c7+c8t][-c7+c9t-1];
scv_951=X[-c7+(c8t+3)][-c7+(c9t+3)];
scv_952=X[-c7+(c8t+1)][-c7+(c9t+3)-1];
scv_942=scv_942-scv_950*scv_841/scv_874;
scv_870=scv_870-scv_869*scv_917/scv_899;
scv_925=scv_925-scv_906*scv_913/scv_943;
scv_902=scv_902-scv_920*scv_930/scv_892;
scv_882=scv_882-scv_889*scv_949/scv_875;
scv_885=scv_885-scv_873*scv_883/scv_845;
scv_944=scv_944-scv_848*scv_939/scv_905;
scv_888=scv_888-scv_854*scv_935/scv_863;
scv_877=scv_877-scv_922*scv_858/scv_861;
scv_891=scv_891-scv_864*scv_878/scv_915;
scv_911=scv_911-scv_842*scv_945/scv_846;
scv_893=scv_893-scv_910*scv_859/scv_937;
scv_936=scv_936-scv_941*scv_914/scv_880;
scv_929=scv_929-scv_952*scv_947/scv_907;
scv_887=scv_887-scv_918*scv_851/scv_853;
scv_951=scv_951-scv_926*scv_908/scv_909;
scv_912=scv_912-scv_841*scv_841/scv_874;
scv_927=scv_927-scv_917*scv_917/scv_899;
scv_924=scv_924-scv_913*scv_913/scv_943;
scv_938=scv_938-scv_930*scv_930/scv_892;
scv_940=scv_940-scv_949*scv_949/scv_875;
scv_856=scv_856-scv_883*scv_883/scv_845;
scv_933=scv_933-scv_939*scv_939/scv_905;
scv_898=scv_898-scv_935*scv_935/scv_863;
scv_919=scv_919-scv_858*scv_858/scv_861;
scv_897=scv_897-scv_878*scv_878/scv_915;
scv_876=scv_876-scv_945*scv_945/scv_846;
scv_903=scv_903-scv_859*scv_859/scv_937;
scv_884=scv_884-scv_914*scv_914/scv_880;
scv_867=scv_867-scv_947*scv_947/scv_907;
scv_931=scv_931-scv_851*scv_851/scv_853;
scv_844=scv_844-scv_908*scv_908/scv_909;
scv_950=scv_950-X[-c7+c8t-1][-c7+c9t-1]*scv_849/scv_879;
scv_869=scv_869-X[-c7+(c8t+1)-1][-c7+c9t-1]*scv_901/scv_857;
scv_906=scv_906-X[-c7+(c8t+2)-1][-c7+c9t-1]*scv_921/scv_865;
scv_920=scv_920-X[-c7+(c8t+3)-1][-c7+c9t-1]*scv_895/scv_923;
scv_889=scv_889-X[-c7+c8t-1][-c7+(c9t+1)-1]*scv_855/scv_852;
scv_873=scv_873-X[-c7+(c8t+1)-1][-c7+(c9t+1)-1]*scv_866/scv_847;
scv_848=scv_848-X[-c7+(c8t+2)-1][-c7+(c9t+1)-1]*scv_868/scv_894;
scv_854=scv_854-X[-c7+(c8t+3)-1][-c7+(c9t+1)-1]*scv_862/scv_916;
scv_922=scv_922-X[-c7+c8t-1][-c7+(c9t+2)-1]*scv_872/scv_948;
scv_864=scv_864-X[-c7+(c8t+1)-1][-c7+(c9t+2)-1]*scv_928/scv_904;
scv_842=scv_842-X[-c7+(c8t+2)-1][-c7+(c9t+2)-1]*scv_946/scv_896;
scv_910=scv_910-X[-c7+(c8t+3)-1][-c7+(c9t+2)-1]*scv_934/scv_881;
scv_941=scv_941-X[-c7+c8t-1][-c7+(c9t+3)-1]*scv_886/scv_932;
scv_952=scv_952-X[-c7+(c8t+1)-1][-c7+(c9t+3)-1]*scv_843/scv_871;
scv_918=scv_918-X[-c7+(c8t+2)-1][-c7+(c9t+3)-1]*scv_890/scv_900;
scv_926=scv_926-X[-c7+(c8t+3)-1][-c7+(c9t+3)-1]*scv_860/scv_850;
scv_874=scv_874-scv_849*scv_849/scv_879;
scv_899=scv_899-scv_901*scv_901/scv_857;
scv_943=scv_943-scv_921*scv_921/scv_865;
scv_892=scv_892-scv_895*scv_895/scv_923;
scv_875=scv_875-scv_855*scv_855/scv_852;
scv_845=scv_845-scv_866*scv_866/scv_847;
scv_905=scv_905-scv_868*scv_868/scv_894;
scv_863=scv_863-scv_862*scv_862/scv_916;
scv_861=scv_861-scv_872*scv_872/scv_948;
scv_915=scv_915-scv_928*scv_928/scv_904;
scv_846=scv_846-scv_946*scv_946/scv_896;
scv_937=scv_937-scv_934*scv_934/scv_881;
scv_880=scv_880-scv_886*scv_886/scv_932;
scv_907=scv_907-scv_843*scv_843/scv_871;
scv_853=scv_853-scv_890*scv_890/scv_900;
scv_909=scv_909-scv_860*scv_860/scv_850;
X[-c7+(c8t+2)][-c7+(c9t+2)-1]=scv_842;
B[-c7+(c8t+3)][-c7+(c9t+3)]=scv_844;
B[-c7+(c8t+1)][-c7+(c9t+1)-1]=scv_845;
B[-c7+(c8t+2)][-c7+(c9t+2)-1]=scv_846;
X[-c7+(c8t+2)][-c7+(c9t+1)-1]=scv_848;
B[-c7+(c8t+2)][-c7+(c9t+3)-1]=scv_853;
X[-c7+(c8t+3)][-c7+(c9t+1)-1]=scv_854;
B[-c7+(c8t+1)][-c7+(c9t+1)]=scv_856;
B[-c7+c8t][-c7+(c9t+2)-1]=scv_861;
B[-c7+(c8t+3)][-c7+(c9t+1)-1]=scv_863;
X[-c7+(c8t+1)][-c7+(c9t+2)-1]=scv_864;
B[-c7+(c8t+1)][-c7+(c9t+3)]=scv_867;
X[-c7+(c8t+1)][-c7+c9t-1]=scv_869;
X[-c7+(c8t+1)][-c7+c9t]=scv_870;
X[-c7+(c8t+1)][-c7+(c9t+1)-1]=scv_873;
B[-c7+c8t][-c7+c9t-1]=scv_874;
B[-c7+c8t][-c7+(c9t+1)-1]=scv_875;
B[-c7+(c8t+2)][-c7+(c9t+2)]=scv_876;
X[-c7+c8t][-c7+(c9t+2)]=scv_877;
B[-c7+c8t][-c7+(c9t+3)-1]=scv_880;
X[-c7+c8t][-c7+(c9t+1)]=scv_882;
B[-c7+c8t][-c7+(c9t+3)]=scv_884;
X[-c7+(c8t+1)][-c7+(c9t+1)]=scv_885;
X[-c7+(c8t+2)][-c7+(c9t+3)]=scv_887;
X[-c7+(c8t+3)][-c7+(c9t+1)]=scv_888;
X[-c7+c8t][-c7+(c9t+1)-1]=scv_889;
X[-c7+(c8t+1)][-c7+(c9t+2)]=scv_891;
B[-c7+(c8t+3)][-c7+c9t-1]=scv_892;
X[-c7+(c8t+3)][-c7+(c9t+2)]=scv_893;
B[-c7+(c8t+1)][-c7+(c9t+2)]=scv_897;
B[-c7+(c8t+3)][-c7+(c9t+1)]=scv_898;
B[-c7+(c8t+1)][-c7+c9t-1]=scv_899;
X[-c7+(c8t+3)][-c7+c9t]=scv_902;
B[-c7+(c8t+3)][-c7+(c9t+2)]=scv_903;
B[-c7+(c8t+2)][-c7+(c9t+1)-1]=scv_905;
X[-c7+(c8t+2)][-c7+c9t-1]=scv_906;
B[-c7+(c8t+1)][-c7+(c9t+3)-1]=scv_907;
B[-c7+(c8t+3)][-c7+(c9t+3)-1]=scv_909;
X[-c7+(c8t+3)][-c7+(c9t+2)-1]=scv_910;
X[-c7+(c8t+2)][-c7+(c9t+2)]=scv_911;
B[-c7+c8t][-c7+c9t]=scv_912;
B[-c7+(c8t+1)][-c7+(c9t+2)-1]=scv_915;
X[-c7+(c8t+2)][-c7+(c9t+3)-1]=scv_918;
B[-c7+c8t][-c7+(c9t+2)]=scv_919;
X[-c7+(c8t+3)][-c7+c9t-1]=scv_920;
X[-c7+c8t][-c7+(c9t+2)-1]=scv_922;
B[-c7+(c8t+2)][-c7+c9t]=scv_924;
X[-c7+(c8t+2)][-c7+c9t]=scv_925;
X[-c7+(c8t+3)][-c7+(c9t+3)-1]=scv_926;
B[-c7+(c8t+1)][-c7+c9t]=scv_927;
X[-c7+(c8t+1)][-c7+(c9t+3)]=scv_929;
B[-c7+(c8t+2)][-c7+(c9t+3)]=scv_931;
B[-c7+(c8t+2)][-c7+(c9t+1)]=scv_933;
X[-c7+c8t][-c7+(c9t+3)]=scv_936;
B[-c7+(c8t+3)][-c7+(c9t+2)-1]=scv_937;
B[-c7+(c8t+3)][-c7+c9t]=scv_938;
B[-c7+c8t][-c7+(c9t+1)]=scv_940;
X[-c7+c8t][-c7+(c9t+3)-1]=scv_941;
X[-c7+c8t][-c7+c9t]=scv_942;
B[-c7+(c8t+2)][-c7+c9t-1]=scv_943;
X[-c7+(c8t+2)][-c7+(c9t+1)]=scv_944;
X[-c7+c8t][-c7+c9t-1]=scv_950;
X[-c7+(c8t+3)][-c7+(c9t+3)]=scv_951;
X[-c7+(c8t+1)][-c7+(c9t+3)-1]=scv_952;
}
register int cbv_22;
cbv_22=min(c7+N-1,16*c2+15);
#pragma ivdep
#pragma vector always
for (c8=c8t; c8<=cbv_22; c8=c8+1) {
double scv_953, scv_954, scv_955, scv_956, scv_957, scv_958, scv_959, scv_960;
double scv_961, scv_962, scv_963, scv_964, scv_965, scv_966, scv_967, scv_968;
double scv_969, scv_970, scv_971, scv_972, scv_973, scv_974, scv_975, scv_976;
double scv_977, scv_978, scv_979, scv_980;
scv_953=B[-c7+c8-1][-c7+(c9t+2)-1];
scv_954=B[-c7+c8][-c7+(c9t+1)-1];
scv_955=X[-c7+c8][-c7+(c9t+2)];
scv_956=A[-c7+c8][-c7+(c9t+3)-1];
scv_957=X[-c7+c8][-c7+(c9t+3)-1];
scv_958=X[-c7+c8][-c7+(c9t+3)];
scv_959=B[-c7+c8-1][-c7+c9t-1];
scv_960=X[-c7+c8][-c7+(c9t+1)-1];
scv_961=B[-c7+c8][-c7+c9t-1];
scv_962=B[-c7+c8-1][-c7+(c9t+1)-1];
scv_963=X[-c7+c8][-c7+(c9t+1)];
scv_964=B[-c7+c8][-c7+(c9t+2)];
scv_965=B[-c7+c8][-c7+(c9t+1)];
scv_966=A[-c7+c8][-c7+c9t-1];
scv_967=A[-c7+c8][-c7+(c9t+3)];
scv_968=A[-c7+c8][-c7+(c9t+2)-1];
scv_969=B[-c7+c8][-c7+(c9t+3)];
scv_970=A[-c7+c8][-c7+(c9t+1)-1];
scv_971=B[-c7+c8][-c7+(c9t+3)-1];
scv_972=B[-c7+c8-1][-c7+(c9t+3)-1];
scv_973=A[-c7+c8][-c7+c9t];
scv_974=A[-c7+c8][-c7+(c9t+1)];
scv_975=B[-c7+c8][-c7+(c9t+2)-1];
scv_976=A[-c7+c8][-c7+(c9t+2)];
scv_977=X[-c7+c8][-c7+c9t];
scv_978=B[-c7+c8][-c7+c9t];
scv_979=X[-c7+c8][-c7+(c9t+2)-1];
scv_980=X[-c7+c8][-c7+c9t-1];
scv_977=scv_977-scv_980*scv_973/scv_961;
scv_963=scv_963-scv_960*scv_974/scv_954;
scv_955=scv_955-scv_979*scv_976/scv_975;
scv_958=scv_958-scv_957*scv_967/scv_971;
scv_978=scv_978-scv_973*scv_973/scv_961;
scv_965=scv_965-scv_974*scv_974/scv_954;
scv_964=scv_964-scv_976*scv_976/scv_975;
scv_969=scv_969-scv_967*scv_967/scv_971;
scv_980=scv_980-X[-c7+c8-1][-c7+c9t-1]*scv_966/scv_959;
scv_960=scv_960-X[-c7+c8-1][-c7+(c9t+1)-1]*scv_970/scv_962;
scv_979=scv_979-X[-c7+c8-1][-c7+(c9t+2)-1]*scv_968/scv_953;
scv_957=scv_957-X[-c7+c8-1][-c7+(c9t+3)-1]*scv_956/scv_972;
scv_961=scv_961-scv_966*scv_966/scv_959;
scv_954=scv_954-scv_970*scv_970/scv_962;
scv_975=scv_975-scv_968*scv_968/scv_953;
scv_971=scv_971-scv_956*scv_956/scv_972;
B[-c7+c8][-c7+(c9t+1)-1]=scv_954;
X[-c7+c8][-c7+(c9t+2)]=scv_955;
X[-c7+c8][-c7+(c9t+3)-1]=scv_957;
X[-c7+c8][-c7+(c9t+3)]=scv_958;
X[-c7+c8][-c7+(c9t+1)-1]=scv_960;
B[-c7+c8][-c7+c9t-1]=scv_961;
X[-c7+c8][-c7+(c9t+1)]=scv_963;
B[-c7+c8][-c7+(c9t+2)]=scv_964;
B[-c7+c8][-c7+(c9t+1)]=scv_965;
B[-c7+c8][-c7+(c9t+3)]=scv_969;
B[-c7+c8][-c7+(c9t+3)-1]=scv_971;
B[-c7+c8][-c7+(c9t+2)-1]=scv_975;
X[-c7+c8][-c7+c9t]=scv_977;
B[-c7+c8][-c7+c9t]=scv_978;
X[-c7+c8][-c7+(c9t+2)-1]=scv_979;
X[-c7+c8][-c7+c9t-1]=scv_980;
}
}
for (c9=c9t; c9<=16*c6+15; c9=c9+1) {
register int cbv_23, cbv_24;
cbv_23=16*c2;
cbv_24=min(c7+N-1,16*c2+15)-3;
#pragma ivdep
#pragma vector always
for (c8t=cbv_23; c8t<=cbv_24; c8t=c8t+4) {
double scv_981, scv_982, scv_983, scv_984, scv_985, scv_986, scv_987, scv_988;
double scv_989, scv_990, scv_991, scv_992, scv_993, scv_994, scv_995, scv_996;
double scv_997, scv_998, scv_999, scv_1000, scv_1001, scv_1002, scv_1003, scv_1004;
double scv_1005, scv_1006, scv_1007, scv_1008;
scv_981=X[-c7+(c8t+2)][-c7+c9-1];
scv_982=X[-c7+(c8t+1)][-c7+c9-1];
scv_983=B[-c7+(c8t+1)][-c7+c9];
scv_984=B[-c7+c8t][-c7+c9];
scv_985=B[-c7+(c8t+3)-1][-c7+c9-1];
scv_986=A[-c7+c8t][-c7+c9];
scv_987=B[-c7+c8t-1][-c7+c9-1];
scv_988=B[-c7+(c8t+2)][-c7+c9];
scv_989=B[-c7+(c8t+2)][-c7+c9-1];
scv_990=X[-c7+(c8t+3)][-c7+c9];
scv_991=A[-c7+(c8t+1)][-c7+c9];
scv_992=X[-c7+c8t][-c7+c9];
scv_993=B[-c7+(c8t+1)][-c7+c9-1];
scv_994=X[-c7+(c8t+1)][-c7+c9];
scv_995=B[-c7+(c8t+1)-1][-c7+c9-1];
scv_996=A[-c7+(c8t+1)][-c7+c9-1];
scv_997=B[-c7+(c8t+3)][-c7+c9-1];
scv_998=X[-c7+(c8t+3)][-c7+c9-1];
scv_999=B[-c7+c8t][-c7+c9-1];
scv_1000=A[-c7+(c8t+3)][-c7+c9];
scv_1001=A[-c7+(c8t+3)][-c7+c9-1];
scv_1002=A[-c7+c8t][-c7+c9-1];
scv_1003=B[-c7+(c8t+3)][-c7+c9];
scv_1004=X[-c7+c8t][-c7+c9-1];
scv_1005=A[-c7+(c8t+2)][-c7+c9-1];
scv_1006=B[-c7+(c8t+2)-1][-c7+c9-1];
scv_1007=A[-c7+(c8t+2)][-c7+c9];
scv_1008=X[-c7+(c8t+2)][-c7+c9];
scv_992=scv_992-scv_1004*scv_986/scv_999;
scv_994=scv_994-scv_982*scv_991/scv_993;
scv_1008=scv_1008-scv_981*scv_1007/scv_989;
scv_990=scv_990-scv_998*scv_1000/scv_997;
scv_984=scv_984-scv_986*scv_986/scv_999;
scv_983=scv_983-scv_991*scv_991/scv_993;
scv_988=scv_988-scv_1007*scv_1007/scv_989;
scv_1003=scv_1003-scv_1000*scv_1000/scv_997;
scv_1004=scv_1004-X[-c7+c8t-1][-c7+c9-1]*scv_1002/scv_987;
scv_982=scv_982-X[-c7+(c8t+1)-1][-c7+c9-1]*scv_996/scv_995;
scv_981=scv_981-X[-c7+(c8t+2)-1][-c7+c9-1]*scv_1005/scv_1006;
scv_998=scv_998-X[-c7+(c8t+3)-1][-c7+c9-1]*scv_1001/scv_985;
scv_999=scv_999-scv_1002*scv_1002/scv_987;
scv_993=scv_993-scv_996*scv_996/scv_995;
scv_989=scv_989-scv_1005*scv_1005/scv_1006;
scv_997=scv_997-scv_1001*scv_1001/scv_985;
X[-c7+(c8t+2)][-c7+c9-1]=scv_981;
X[-c7+(c8t+1)][-c7+c9-1]=scv_982;
B[-c7+(c8t+1)][-c7+c9]=scv_983;
B[-c7+c8t][-c7+c9]=scv_984;
B[-c7+(c8t+2)][-c7+c9]=scv_988;
B[-c7+(c8t+2)][-c7+c9-1]=scv_989;
X[-c7+(c8t+3)][-c7+c9]=scv_990;
X[-c7+c8t][-c7+c9]=scv_992;
B[-c7+(c8t+1)][-c7+c9-1]=scv_993;
X[-c7+(c8t+1)][-c7+c9]=scv_994;
B[-c7+(c8t+3)][-c7+c9-1]=scv_997;
X[-c7+(c8t+3)][-c7+c9-1]=scv_998;
B[-c7+c8t][-c7+c9-1]=scv_999;
B[-c7+(c8t+3)][-c7+c9]=scv_1003;
X[-c7+c8t][-c7+c9-1]=scv_1004;
X[-c7+(c8t+2)][-c7+c9]=scv_1008;
}
register int cbv_25;
cbv_25=min(c7+N-1,16*c2+15);
#pragma ivdep
#pragma vector always
for (c8=c8t; c8<=cbv_25; c8=c8+1) {
double scv_1009, scv_1010, scv_1011, scv_1012, scv_1013, scv_1014, scv_1015;
scv_1009=X[-c7+c8][-c7+c9-1];
scv_1010=X[-c7+c8][-c7+c9];
scv_1011=B[-c7+c8][-c7+c9-1];
scv_1012=B[-c7+c8][-c7+c9];
scv_1013=A[-c7+c8][-c7+c9-1];
scv_1014=A[-c7+c8][-c7+c9];
scv_1015=B[-c7+c8-1][-c7+c9-1];
scv_1010=scv_1010-scv_1009*scv_1014/scv_1011;
scv_1012=scv_1012-scv_1014*scv_1014/scv_1011;
scv_1009=scv_1009-X[-c7+c8-1][-c7+c9-1]*scv_1013/scv_1015;
scv_1011=scv_1011-scv_1013*scv_1013/scv_1015;
X[-c7+c8][-c7+c9-1]=scv_1009;
X[-c7+c8][-c7+c9]=scv_1010;
B[-c7+c8][-c7+c9-1]=scv_1011;
B[-c7+c8][-c7+c9]=scv_1012;
}
}
}
}
for (c7=max(max(max(16*c2,16*c6-N+16),32*c1-32*c2),0);c7<=min(min(min(T-1,32*c1-32*c2+31),16*c2+14),16*c6+14);c7++) {
for (c9=max(c7+1,16*c6);c9<=16*c6+15;c9++) {
X[0][-c7+c9]=X[0][-c7+c9]-X[0][-c7+c9-1]*A[0][-c7+c9]/B[0][-c7+c9-1] ;
B[0][-c7+c9]=B[0][-c7+c9]-A[0][-c7+c9]*A[0][-c7+c9]/B[0][-c7+c9-1] ;
}
for (c8=c7+1;c8<=16*c2+15;c8++) {
for (c9=max(16*c6,c7+1);c9<=16*c6+15;c9++) {
X[-c7+c8][-c7+c9]=X[-c7+c8][-c7+c9]-X[-c7+c8][-c7+c9-1]*A[-c7+c8][-c7+c9]/B[-c7+c8][-c7+c9-1] ;
B[-c7+c8][-c7+c9]=B[-c7+c8][-c7+c9]-A[-c7+c8][-c7+c9]*A[-c7+c8][-c7+c9]/B[-c7+c8][-c7+c9-1] ;
X[-c7+c8][-c7+c9-1]=X[-c7+c8][-c7+c9-1]-X[-c7+c8-1][-c7+c9-1]*A[-c7+c8][-c7+c9-1]/B[-c7+c8-1][-c7+c9-1] ;
B[-c7+c8][-c7+c9-1]=B[-c7+c8][-c7+c9-1]-A[-c7+c8][-c7+c9-1]*A[-c7+c8][-c7+c9-1]/B[-c7+c8-1][-c7+c9-1] ;
}
}
}
if ((c1 >= ceild(3*c2-1,2)) && (c2 <= min(floord(T-16,16),floord(16*c6-1,16)))) {
for (c9=max(16*c2+16,16*c6);c9<=min(16*c2+N+14,16*c6+15);c9++) {
X[0][-16*c2+c9-15]=X[0][-16*c2+c9-15]-X[0][-16*c2+c9-15 -1]*A[0][-16*c2+c9-15]/B[0][-16*c2+c9-15 -1] ;
B[0][-16*c2+c9-15]=B[0][-16*c2+c9-15]-A[0][-16*c2+c9-15]*A[0][-16*c2+c9-15]/B[0][-16*c2+c9-15 -1] ;
}
}
}
}
}
}
/* End of CLooG code */
annot_t_end = rtclock();
annot_t_total += annot_t_end - annot_t_start;
}
annot_t_total = annot_t_total / REPS;
printf("%f\n", annot_t_total);
return ((int) B[0][0]);
}
|
implicit_blender.c | /*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) Blender Foundation
* All rights reserved.
*/
/** \file
* \ingroup bph
*/
#include "implicit.h"
#ifdef IMPLICIT_SOLVER_BLENDER
# include "MEM_guardedalloc.h"
# include "DNA_meshdata_types.h"
# include "DNA_object_force_types.h"
# include "DNA_object_types.h"
# include "DNA_scene_types.h"
# include "DNA_texture_types.h"
# include "BLI_math.h"
# include "BLI_utildefines.h"
# include "BKE_cloth.h"
# include "BKE_collision.h"
# include "BKE_effect.h"
# include "SIM_mass_spring.h"
# ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wtype-limits"
# endif
# ifdef _OPENMP
# define CLOTH_OPENMP_LIMIT 512
# endif
//#define DEBUG_TIME
# ifdef DEBUG_TIME
# include "PIL_time.h"
# endif
static float I[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}};
static float ZERO[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}};
# if 0
# define C99
# ifdef C99
# defineDO_INLINE inline
# else
# defineDO_INLINE static
# endif
# endif /* if 0 */
struct Cloth;
//////////////////////////////////////////
/* fast vector / matrix library, enhancements are welcome :) -dg */
/////////////////////////////////////////
/* DEFINITIONS */
typedef float lfVector[3];
typedef struct fmatrix3x3 {
float m[3][3]; /* 3x3 matrix */
unsigned int c, r; /* column and row number */
// int pinned; /* is this vertex allowed to move? */
float n1, n2, n3; /* three normal vectors for collision constrains */
unsigned int vcount; /* vertex count */
unsigned int scount; /* spring count */
} fmatrix3x3;
///////////////////////////
/* float[3] vector */
///////////////////////////
/* simple vector code */
/* STATUS: verified */
DO_INLINE void mul_fvector_S(float to[3], const float from[3], float scalar)
{
to[0] = from[0] * scalar;
to[1] = from[1] * scalar;
to[2] = from[2] * scalar;
}
/* simple v^T * v product ("outer product") */
/* STATUS: HAS TO BE verified (*should* work) */
DO_INLINE void mul_fvectorT_fvector(float to[3][3], const float vectorA[3], const float vectorB[3])
{
mul_fvector_S(to[0], vectorB, vectorA[0]);
mul_fvector_S(to[1], vectorB, vectorA[1]);
mul_fvector_S(to[2], vectorB, vectorA[2]);
}
/* simple v^T * v product with scalar ("outer product") */
/* STATUS: HAS TO BE verified (*should* work) */
DO_INLINE void mul_fvectorT_fvectorS(float to[3][3], float vectorA[3], float vectorB[3], float aS)
{
mul_fvectorT_fvector(to, vectorA, vectorB);
mul_fvector_S(to[0], to[0], aS);
mul_fvector_S(to[1], to[1], aS);
mul_fvector_S(to[2], to[2], aS);
}
# if 0
/* printf vector[3] on console: for debug output */
static void print_fvector(float m3[3])
{
printf("%f\n%f\n%f\n\n", m3[0], m3[1], m3[2]);
}
///////////////////////////
/* long float vector float (*)[3] */
///////////////////////////
/* print long vector on console: for debug output */
DO_INLINE void print_lfvector(float (*fLongVector)[3], unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
print_fvector(fLongVector[i]);
}
}
# endif
/* create long vector */
DO_INLINE lfVector *create_lfvector(unsigned int verts)
{
/* TODO: check if memory allocation was successful */
return (lfVector *)MEM_callocN(verts * sizeof(lfVector), "cloth_implicit_alloc_vector");
// return (lfVector *)cloth_aligned_malloc(&MEMORY_BASE, verts * sizeof(lfVector));
}
/* delete long vector */
DO_INLINE void del_lfvector(float (*fLongVector)[3])
{
if (fLongVector != NULL) {
MEM_freeN(fLongVector);
// cloth_aligned_free(&MEMORY_BASE, fLongVector);
}
}
/* copy long vector */
DO_INLINE void cp_lfvector(float (*to)[3], float (*from)[3], unsigned int verts)
{
memcpy(to, from, verts * sizeof(lfVector));
}
/* init long vector with float[3] */
DO_INLINE void init_lfvector(float (*fLongVector)[3], const float vector[3], unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
copy_v3_v3(fLongVector[i], vector);
}
}
/* zero long vector with float[3] */
DO_INLINE void zero_lfvector(float (*to)[3], unsigned int verts)
{
memset(to, 0.0f, verts * sizeof(lfVector));
}
/* multiply long vector with scalar*/
DO_INLINE void mul_lfvectorS(float (*to)[3],
float (*fLongVector)[3],
float scalar,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
mul_fvector_S(to[i], fLongVector[i], scalar);
}
}
/* multiply long vector with scalar*/
/* A -= B * float */
DO_INLINE void submul_lfvectorS(float (*to)[3],
float (*fLongVector)[3],
float scalar,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
VECSUBMUL(to[i], fLongVector[i], scalar);
}
}
/* dot product for big vector */
DO_INLINE float dot_lfvector(float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
unsigned int verts)
{
long i = 0;
float temp = 0.0;
/* XXX brecht, disabled this for now (first schedule line was already disabled),
* due to non-commutative nature of floating point ops this makes the sim give
* different results each time you run it!
* schedule(guided, 2) */
//#pragma omp parallel for reduction(+: temp) if (verts > CLOTH_OPENMP_LIMIT)
for (i = 0; i < (long)verts; i++) {
temp += dot_v3v3(fLongVectorA[i], fLongVectorB[i]);
}
return temp;
}
/* A = B + C --> for big vector */
DO_INLINE void add_lfvector_lfvector(float (*to)[3],
float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
add_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]);
}
}
/* A = B + C * float --> for big vector */
DO_INLINE void add_lfvector_lfvectorS(float (*to)[3],
float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
float bS,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
VECADDS(to[i], fLongVectorA[i], fLongVectorB[i], bS);
}
}
/* A = B * float + C * float --> for big vector */
DO_INLINE void add_lfvectorS_lfvectorS(float (*to)[3],
float (*fLongVectorA)[3],
float aS,
float (*fLongVectorB)[3],
float bS,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
VECADDSS(to[i], fLongVectorA[i], aS, fLongVectorB[i], bS);
}
}
/* A = B - C * float --> for big vector */
DO_INLINE void sub_lfvector_lfvectorS(float (*to)[3],
float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
float bS,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
VECSUBS(to[i], fLongVectorA[i], fLongVectorB[i], bS);
}
}
/* A = B - C --> for big vector */
DO_INLINE void sub_lfvector_lfvector(float (*to)[3],
float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
sub_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]);
}
}
///////////////////////////
// 3x3 matrix
///////////////////////////
# if 0
/* printf 3x3 matrix on console: for debug output */
static void print_fmatrix(float m3[3][3])
{
printf("%f\t%f\t%f\n", m3[0][0], m3[0][1], m3[0][2]);
printf("%f\t%f\t%f\n", m3[1][0], m3[1][1], m3[1][2]);
printf("%f\t%f\t%f\n\n", m3[2][0], m3[2][1], m3[2][2]);
}
static void print_sparse_matrix(fmatrix3x3 *m)
{
if (m) {
unsigned int i;
for (i = 0; i < m[0].vcount + m[0].scount; i++) {
printf("%d:\n", i);
print_fmatrix(m[i].m);
}
}
}
# endif
# if 0
static void print_lvector(lfVector *v, int numverts)
{
int i;
for (i = 0; i < numverts; i++) {
if (i > 0) {
printf("\n");
}
printf("%f,\n", v[i][0]);
printf("%f,\n", v[i][1]);
printf("%f,\n", v[i][2]);
}
}
# endif
# if 0
static void print_bfmatrix(fmatrix3x3 *m)
{
int tot = m[0].vcount + m[0].scount;
int size = m[0].vcount * 3;
float *t = MEM_callocN(sizeof(float) * size * size, "bfmatrix");
int q, i, j;
for (q = 0; q < tot; q++) {
int k = 3 * m[q].r;
int l = 3 * m[q].c;
for (j = 0; j < 3; j++) {
for (i = 0; i < 3; i++) {
# if 0
if (t[k + i + (l + j) * size] != 0.0f) {
printf("warning: overwriting value at %d, %d\n", m[q].r, m[q].c);
}
# endif
if (k == l) {
t[k + i + (k + j) * size] += m[q].m[i][j];
}
else {
t[k + i + (l + j) * size] += m[q].m[i][j];
t[l + j + (k + i) * size] += m[q].m[j][i];
}
}
}
}
for (j = 0; j < size; j++) {
if (j > 0 && j % 3 == 0) {
printf("\n");
}
for (i = 0; i < size; i++) {
if (i > 0 && i % 3 == 0) {
printf(" ");
}
implicit_print_matrix_elem(t[i + j * size]);
}
printf("\n");
}
MEM_freeN(t);
}
# endif
/* copy 3x3 matrix */
DO_INLINE void cp_fmatrix(float to[3][3], const float from[3][3])
{
// memcpy(to, from, sizeof(float[3][3]));
copy_v3_v3(to[0], from[0]);
copy_v3_v3(to[1], from[1]);
copy_v3_v3(to[2], from[2]);
}
/* copy 3x3 matrix */
DO_INLINE void initdiag_fmatrixS(float to[3][3], float aS)
{
cp_fmatrix(to, ZERO);
to[0][0] = aS;
to[1][1] = aS;
to[2][2] = aS;
}
# if 0
/* calculate determinant of 3x3 matrix */
DO_INLINE float det_fmatrix(float m[3][3])
{
return m[0][0] * m[1][1] * m[2][2] + m[1][0] * m[2][1] * m[0][2] + m[0][1] * m[1][2] * m[2][0] -
m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[2][0] * m[1][1] * m[0][2];
}
DO_INLINE void inverse_fmatrix(float to[3][3], float from[3][3])
{
unsigned int i, j;
float d;
if ((d = det_fmatrix(from)) == 0) {
printf("can't build inverse");
exit(0);
}
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
int i1 = (i + 1) % 3;
int i2 = (i + 2) % 3;
int j1 = (j + 1) % 3;
int j2 = (j + 2) % 3;
/** Reverse indexes i&j to take transpose. */
to[j][i] = (from[i1][j1] * from[i2][j2] - from[i1][j2] * from[i2][j1]) / d;
/**
* <pre>
* if (i == j) {
* to[i][j] = 1.0f / from[i][j];
* }
* else {
* to[i][j] = 0;
* }
* </pre>
*/
}
}
}
# endif
/* 3x3 matrix multiplied by a scalar */
/* STATUS: verified */
DO_INLINE void mul_fmatrix_S(float matrix[3][3], float scalar)
{
mul_fvector_S(matrix[0], matrix[0], scalar);
mul_fvector_S(matrix[1], matrix[1], scalar);
mul_fvector_S(matrix[2], matrix[2], scalar);
}
/* a vector multiplied by a 3x3 matrix */
/* STATUS: verified */
DO_INLINE void mul_fvector_fmatrix(float *to, const float *from, const float matrix[3][3])
{
to[0] = matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2];
to[1] = matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2];
to[2] = matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2];
}
/* 3x3 matrix multiplied by a vector */
/* STATUS: verified */
DO_INLINE void mul_fmatrix_fvector(float *to, const float matrix[3][3], const float from[3])
{
to[0] = dot_v3v3(matrix[0], from);
to[1] = dot_v3v3(matrix[1], from);
to[2] = dot_v3v3(matrix[2], from);
}
/* 3x3 matrix addition with 3x3 matrix */
DO_INLINE void add_fmatrix_fmatrix(float to[3][3],
const float matrixA[3][3],
const float matrixB[3][3])
{
add_v3_v3v3(to[0], matrixA[0], matrixB[0]);
add_v3_v3v3(to[1], matrixA[1], matrixB[1]);
add_v3_v3v3(to[2], matrixA[2], matrixB[2]);
}
/* A -= B*x + C*y (3x3 matrix sub-addition with 3x3 matrix) */
DO_INLINE void subadd_fmatrixS_fmatrixS(
float to[3][3], const float matrixA[3][3], float aS, const float matrixB[3][3], float bS)
{
VECSUBADDSS(to[0], matrixA[0], aS, matrixB[0], bS);
VECSUBADDSS(to[1], matrixA[1], aS, matrixB[1], bS);
VECSUBADDSS(to[2], matrixA[2], aS, matrixB[2], bS);
}
/* A = B - C (3x3 matrix subtraction with 3x3 matrix) */
DO_INLINE void sub_fmatrix_fmatrix(float to[3][3],
const float matrixA[3][3],
const float matrixB[3][3])
{
sub_v3_v3v3(to[0], matrixA[0], matrixB[0]);
sub_v3_v3v3(to[1], matrixA[1], matrixB[1]);
sub_v3_v3v3(to[2], matrixA[2], matrixB[2]);
}
/////////////////////////////////////////////////////////////////
/* special functions */
/////////////////////////////////////////////////////////////////
/* 3x3 matrix multiplied+added by a vector */
/* STATUS: verified */
DO_INLINE void muladd_fmatrix_fvector(float to[3], const float matrix[3][3], const float from[3])
{
to[0] += dot_v3v3(matrix[0], from);
to[1] += dot_v3v3(matrix[1], from);
to[2] += dot_v3v3(matrix[2], from);
}
DO_INLINE void muladd_fmatrixT_fvector(float to[3], const float matrix[3][3], const float from[3])
{
to[0] += matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2];
to[1] += matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2];
to[2] += matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2];
}
BLI_INLINE void outerproduct(float r[3][3], const float a[3], const float b[3])
{
mul_v3_v3fl(r[0], a, b[0]);
mul_v3_v3fl(r[1], a, b[1]);
mul_v3_v3fl(r[2], a, b[2]);
}
BLI_INLINE void cross_m3_v3m3(float r[3][3], const float v[3], const float m[3][3])
{
cross_v3_v3v3(r[0], v, m[0]);
cross_v3_v3v3(r[1], v, m[1]);
cross_v3_v3v3(r[2], v, m[2]);
}
BLI_INLINE void cross_v3_identity(float r[3][3], const float v[3])
{
r[0][0] = 0.0f;
r[1][0] = v[2];
r[2][0] = -v[1];
r[0][1] = -v[2];
r[1][1] = 0.0f;
r[2][1] = v[0];
r[0][2] = v[1];
r[1][2] = -v[0];
r[2][2] = 0.0f;
}
BLI_INLINE void madd_m3_m3fl(float r[3][3], const float m[3][3], float f)
{
r[0][0] += m[0][0] * f;
r[0][1] += m[0][1] * f;
r[0][2] += m[0][2] * f;
r[1][0] += m[1][0] * f;
r[1][1] += m[1][1] * f;
r[1][2] += m[1][2] * f;
r[2][0] += m[2][0] * f;
r[2][1] += m[2][1] * f;
r[2][2] += m[2][2] * f;
}
/////////////////////////////////////////////////////////////////
///////////////////////////
/* SPARSE SYMMETRIC big matrix with 3x3 matrix entries */
///////////////////////////
/* printf a big matrix on console: for debug output */
# if 0
static void print_bfmatrix(fmatrix3x3 *m3)
{
unsigned int i = 0;
for (i = 0; i < m3[0].vcount + m3[0].scount; i++) {
print_fmatrix(m3[i].m);
}
}
# endif
BLI_INLINE void init_fmatrix(fmatrix3x3 *matrix, int r, int c)
{
matrix->r = r;
matrix->c = c;
}
/* create big matrix */
DO_INLINE fmatrix3x3 *create_bfmatrix(unsigned int verts, unsigned int springs)
{
/* TODO: check if memory allocation was successful */
fmatrix3x3 *temp = (fmatrix3x3 *)MEM_callocN(sizeof(fmatrix3x3) * (verts + springs),
"cloth_implicit_alloc_matrix");
int i;
temp[0].vcount = verts;
temp[0].scount = springs;
/* vertex part of the matrix is diagonal blocks */
for (i = 0; i < verts; i++) {
init_fmatrix(temp + i, i, i);
}
return temp;
}
/* delete big matrix */
DO_INLINE void del_bfmatrix(fmatrix3x3 *matrix)
{
if (matrix != NULL) {
MEM_freeN(matrix);
}
}
/* copy big matrix */
DO_INLINE void cp_bfmatrix(fmatrix3x3 *to, fmatrix3x3 *from)
{
/* TODO bounds checking */
memcpy(to, from, sizeof(fmatrix3x3) * (from[0].vcount + from[0].scount));
}
/* init big matrix */
/* slow in parallel */
DO_INLINE void init_bfmatrix(fmatrix3x3 *matrix, float m3[3][3])
{
unsigned int i;
for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) {
cp_fmatrix(matrix[i].m, m3);
}
}
/* init the diagonal of big matrix */
/* slow in parallel */
DO_INLINE void initdiag_bfmatrix(fmatrix3x3 *matrix, float m3[3][3])
{
unsigned int i, j;
float tmatrix[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}};
for (i = 0; i < matrix[0].vcount; i++) {
cp_fmatrix(matrix[i].m, m3);
}
for (j = matrix[0].vcount; j < matrix[0].vcount + matrix[0].scount; j++) {
cp_fmatrix(matrix[j].m, tmatrix);
}
}
/* SPARSE SYMMETRIC multiply big matrix with long vector*/
/* STATUS: verified */
DO_INLINE void mul_bfmatrix_lfvector(float (*to)[3], fmatrix3x3 *from, lfVector *fLongVector)
{
unsigned int vcount = from[0].vcount;
lfVector *temp = create_lfvector(vcount);
zero_lfvector(to, vcount);
# pragma omp parallel sections if (vcount > CLOTH_OPENMP_LIMIT)
{
# pragma omp section
{
for (unsigned int i = from[0].vcount; i < from[0].vcount + from[0].scount; i++) {
/* This is the lower triangle of the sparse matrix,
* therefore multiplication occurs with transposed submatrices. */
muladd_fmatrixT_fvector(to[from[i].c], from[i].m, fLongVector[from[i].r]);
}
}
# pragma omp section
{
for (unsigned int i = 0; i < from[0].vcount + from[0].scount; i++) {
muladd_fmatrix_fvector(temp[from[i].r], from[i].m, fLongVector[from[i].c]);
}
}
}
add_lfvector_lfvector(to, to, temp, from[0].vcount);
del_lfvector(temp);
}
/* SPARSE SYMMETRIC sub big matrix with big matrix*/
/* A -= B * float + C * float --> for big matrix */
/* VERIFIED */
DO_INLINE void subadd_bfmatrixS_bfmatrixS(
fmatrix3x3 *to, fmatrix3x3 *from, float aS, fmatrix3x3 *matrix, float bS)
{
unsigned int i = 0;
/* process diagonal elements */
for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) {
subadd_fmatrixS_fmatrixS(to[i].m, from[i].m, aS, matrix[i].m, bS);
}
}
///////////////////////////////////////////////////////////////////
/* simulator start */
///////////////////////////////////////////////////////////////////
typedef struct Implicit_Data {
/* inputs */
fmatrix3x3 *bigI; /* identity (constant) */
fmatrix3x3 *tfm; /* local coordinate transform */
fmatrix3x3 *M; /* masses */
lfVector *F; /* forces */
fmatrix3x3 *dFdV, *dFdX; /* force jacobians */
int num_blocks; /* number of off-diagonal blocks (springs) */
/* motion state data */
lfVector *X, *Xnew; /* positions */
lfVector *V, *Vnew; /* velocities */
/* internal solver data */
lfVector *B; /* B for A*dV = B */
fmatrix3x3 *A; /* A for A*dV = B */
lfVector *dV; /* velocity change (solution of A*dV = B) */
lfVector *z; /* target velocity in constrained directions */
fmatrix3x3 *S; /* filtering matrix for constraints */
fmatrix3x3 *P, *Pinv; /* pre-conditioning matrix */
} Implicit_Data;
Implicit_Data *SIM_mass_spring_solver_create(int numverts, int numsprings)
{
Implicit_Data *id = (Implicit_Data *)MEM_callocN(sizeof(Implicit_Data), "implicit vecmat");
/* process diagonal elements */
id->tfm = create_bfmatrix(numverts, 0);
id->A = create_bfmatrix(numverts, numsprings);
id->dFdV = create_bfmatrix(numverts, numsprings);
id->dFdX = create_bfmatrix(numverts, numsprings);
id->S = create_bfmatrix(numverts, 0);
id->Pinv = create_bfmatrix(numverts, numsprings);
id->P = create_bfmatrix(numverts, numsprings);
id->bigI = create_bfmatrix(numverts, numsprings); /* TODO 0 springs */
id->M = create_bfmatrix(numverts, numsprings);
id->X = create_lfvector(numverts);
id->Xnew = create_lfvector(numverts);
id->V = create_lfvector(numverts);
id->Vnew = create_lfvector(numverts);
id->F = create_lfvector(numverts);
id->B = create_lfvector(numverts);
id->dV = create_lfvector(numverts);
id->z = create_lfvector(numverts);
initdiag_bfmatrix(id->bigI, I);
return id;
}
void SIM_mass_spring_solver_free(Implicit_Data *id)
{
del_bfmatrix(id->tfm);
del_bfmatrix(id->A);
del_bfmatrix(id->dFdV);
del_bfmatrix(id->dFdX);
del_bfmatrix(id->S);
del_bfmatrix(id->P);
del_bfmatrix(id->Pinv);
del_bfmatrix(id->bigI);
del_bfmatrix(id->M);
del_lfvector(id->X);
del_lfvector(id->Xnew);
del_lfvector(id->V);
del_lfvector(id->Vnew);
del_lfvector(id->F);
del_lfvector(id->B);
del_lfvector(id->dV);
del_lfvector(id->z);
MEM_freeN(id);
}
/* ==== Transformation from/to root reference frames ==== */
BLI_INLINE void world_to_root_v3(Implicit_Data *data, int index, float r[3], const float v[3])
{
copy_v3_v3(r, v);
mul_transposed_m3_v3(data->tfm[index].m, r);
}
BLI_INLINE void root_to_world_v3(Implicit_Data *data, int index, float r[3], const float v[3])
{
mul_v3_m3v3(r, data->tfm[index].m, v);
}
BLI_INLINE void world_to_root_m3(Implicit_Data *data,
int index,
float r[3][3],
const float m[3][3])
{
float trot[3][3];
copy_m3_m3(trot, data->tfm[index].m);
transpose_m3(trot);
mul_m3_m3m3(r, trot, m);
}
BLI_INLINE void root_to_world_m3(Implicit_Data *data,
int index,
float r[3][3],
const float m[3][3])
{
mul_m3_m3m3(r, data->tfm[index].m, m);
}
/* ================================ */
DO_INLINE void filter(lfVector *V, fmatrix3x3 *S)
{
unsigned int i = 0;
for (i = 0; i < S[0].vcount; i++) {
mul_m3_v3(S[i].m, V[S[i].r]);
}
}
/* this version of the CG algorithm does not work very well with partial constraints
* (where S has non-zero elements). */
# if 0
static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S)
{
/* Solves for unknown X in equation AX=B */
unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100;
float conjgrad_epsilon = 0.0001f /* , conjgrad_lasterror=0 */ /* UNUSED */;
lfVector *q, *d, *tmp, *r;
float s, starget, a, s_prev;
unsigned int numverts = lA[0].vcount;
q = create_lfvector(numverts);
d = create_lfvector(numverts);
tmp = create_lfvector(numverts);
r = create_lfvector(numverts);
// zero_lfvector(ldV, CLOTHPARTICLES);
filter(ldV, S);
add_lfvector_lfvector(ldV, ldV, z, numverts);
// r = B - Mul(tmp, A, X); /* just use B if X known to be zero. */
cp_lfvector(r, lB, numverts);
mul_bfmatrix_lfvector(tmp, lA, ldV);
sub_lfvector_lfvector(r, r, tmp, numverts);
filter(r, S);
cp_lfvector(d, r, numverts);
s = dot_lfvector(r, r, numverts);
starget = s * sqrtf(conjgrad_epsilon);
while (s > starget && conjgrad_loopcount < conjgrad_looplimit) {
// Mul(q, A, d); /* q = A*d; */
mul_bfmatrix_lfvector(q, lA, d);
filter(q, S);
a = s / dot_lfvector(d, q, numverts);
/* X = X + d*a; */
add_lfvector_lfvectorS(ldV, ldV, d, a, numverts);
/* r = r - q*a; */
sub_lfvector_lfvectorS(r, r, q, a, numverts);
s_prev = s;
s = dot_lfvector(r, r, numverts);
/* d = r+d*(s/s_prev); */
add_lfvector_lfvectorS(d, r, d, (s / s_prev), numverts);
filter(d, S);
conjgrad_loopcount++;
}
/* conjgrad_lasterror = s; */ /* UNUSED */
del_lfvector(q);
del_lfvector(d);
del_lfvector(tmp);
del_lfvector(r);
// printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount);
return conjgrad_loopcount <
conjgrad_looplimit; /* true means we reached desired accuracy in given time - ie stable */
}
# endif
static int cg_filtered(lfVector *ldV,
fmatrix3x3 *lA,
lfVector *lB,
lfVector *z,
fmatrix3x3 *S,
ImplicitSolverResult *result)
{
/* Solves for unknown X in equation AX=B */
unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100;
float conjgrad_epsilon = 0.01f;
unsigned int numverts = lA[0].vcount;
lfVector *fB = create_lfvector(numverts);
lfVector *AdV = create_lfvector(numverts);
lfVector *r = create_lfvector(numverts);
lfVector *c = create_lfvector(numverts);
lfVector *q = create_lfvector(numverts);
lfVector *s = create_lfvector(numverts);
float bnorm2, delta_new, delta_old, delta_target, alpha;
cp_lfvector(ldV, z, numverts);
/* d0 = filter(B)^T * P * filter(B) */
cp_lfvector(fB, lB, numverts);
filter(fB, S);
bnorm2 = dot_lfvector(fB, fB, numverts);
delta_target = conjgrad_epsilon * conjgrad_epsilon * bnorm2;
/* r = filter(B - A * dV) */
mul_bfmatrix_lfvector(AdV, lA, ldV);
sub_lfvector_lfvector(r, lB, AdV, numverts);
filter(r, S);
/* c = filter(P^-1 * r) */
cp_lfvector(c, r, numverts);
filter(c, S);
/* delta = r^T * c */
delta_new = dot_lfvector(r, c, numverts);
# ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT
printf("==== A ====\n");
print_bfmatrix(lA);
printf("==== z ====\n");
print_lvector(z, numverts);
printf("==== B ====\n");
print_lvector(lB, numverts);
printf("==== S ====\n");
print_bfmatrix(S);
# endif
while (delta_new > delta_target && conjgrad_loopcount < conjgrad_looplimit) {
mul_bfmatrix_lfvector(q, lA, c);
filter(q, S);
alpha = delta_new / dot_lfvector(c, q, numverts);
add_lfvector_lfvectorS(ldV, ldV, c, alpha, numverts);
add_lfvector_lfvectorS(r, r, q, -alpha, numverts);
/* s = P^-1 * r */
cp_lfvector(s, r, numverts);
delta_old = delta_new;
delta_new = dot_lfvector(r, s, numverts);
add_lfvector_lfvectorS(c, s, c, delta_new / delta_old, numverts);
filter(c, S);
conjgrad_loopcount++;
}
# ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT
printf("==== dV ====\n");
print_lvector(ldV, numverts);
printf("========\n");
# endif
del_lfvector(fB);
del_lfvector(AdV);
del_lfvector(r);
del_lfvector(c);
del_lfvector(q);
del_lfvector(s);
// printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount);
result->status = conjgrad_loopcount < conjgrad_looplimit ? SIM_SOLVER_SUCCESS :
SIM_SOLVER_NO_CONVERGENCE;
result->iterations = conjgrad_loopcount;
result->error = bnorm2 > 0.0f ? sqrtf(delta_new / bnorm2) : 0.0f;
return conjgrad_loopcount <
conjgrad_looplimit; /* true means we reached desired accuracy in given time - ie stable */
}
# if 0
/* block diagonalizer */
DO_INLINE void BuildPPinv(fmatrix3x3 *lA, fmatrix3x3 *P, fmatrix3x3 *Pinv)
{
unsigned int i = 0;
/* Take only the diagonal blocks of A */
// #pragma omp parallel for private(i) if (lA[0].vcount > CLOTH_OPENMP_LIMIT)
for (i = 0; i < lA[0].vcount; i++) {
/* block diagonalizer */
cp_fmatrix(P[i].m, lA[i].m);
inverse_fmatrix(Pinv[i].m, P[i].m);
}
}
# if 0
/* version 1.3 */
static int cg_filtered_pre(lfVector *dv,
fmatrix3x3 *lA,
lfVector *lB,
lfVector *z,
fmatrix3x3 *S,
fmatrix3x3 *P,
fmatrix3x3 *Pinv)
{
unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100;
float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0;
float conjgrad_epsilon = 0.0001; /* 0.2 is dt for steps=5 */
lfVector *r = create_lfvector(numverts);
lfVector *p = create_lfvector(numverts);
lfVector *s = create_lfvector(numverts);
lfVector *h = create_lfvector(numverts);
BuildPPinv(lA, P, Pinv);
filter(dv, S);
add_lfvector_lfvector(dv, dv, z, numverts);
mul_bfmatrix_lfvector(r, lA, dv);
sub_lfvector_lfvector(r, lB, r, numverts);
filter(r, S);
mul_prevfmatrix_lfvector(p, Pinv, r);
filter(p, S);
deltaNew = dot_lfvector(r, p, numverts);
delta0 = deltaNew * sqrt(conjgrad_epsilon);
# ifdef DEBUG_TIME
double start = PIL_check_seconds_timer();
# endif
while ((deltaNew > delta0) && (iterations < conjgrad_looplimit)) {
iterations++;
mul_bfmatrix_lfvector(s, lA, p);
filter(s, S);
alpha = deltaNew / dot_lfvector(p, s, numverts);
add_lfvector_lfvectorS(dv, dv, p, alpha, numverts);
add_lfvector_lfvectorS(r, r, s, -alpha, numverts);
mul_prevfmatrix_lfvector(h, Pinv, r);
filter(h, S);
deltaOld = deltaNew;
deltaNew = dot_lfvector(r, h, numverts);
add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts);
filter(p, S);
}
# ifdef DEBUG_TIME
double end = PIL_check_seconds_timer();
printf("cg_filtered_pre time: %f\n", (float)(end - start));
# endif
del_lfvector(h);
del_lfvector(s);
del_lfvector(p);
del_lfvector(r);
printf("iterations: %d\n", iterations);
return iterations < conjgrad_looplimit;
}
# endif
/* version 1.4 */
static int cg_filtered_pre(lfVector *dv,
fmatrix3x3 *lA,
lfVector *lB,
lfVector *z,
fmatrix3x3 *S,
fmatrix3x3 *P,
fmatrix3x3 *Pinv,
fmatrix3x3 *bigI)
{
unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100;
float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0, tol = 0;
lfVector *r = create_lfvector(numverts);
lfVector *p = create_lfvector(numverts);
lfVector *s = create_lfvector(numverts);
lfVector *h = create_lfvector(numverts);
lfVector *bhat = create_lfvector(numverts);
lfVector *btemp = create_lfvector(numverts);
BuildPPinv(lA, P, Pinv);
initdiag_bfmatrix(bigI, I);
sub_bfmatrix_Smatrix(bigI, bigI, S);
/* x = Sx_0+(I-S)z */
filter(dv, S);
add_lfvector_lfvector(dv, dv, z, numverts);
/* b_hat = S(b-A(I-S)z) */
mul_bfmatrix_lfvector(r, lA, z);
mul_bfmatrix_lfvector(bhat, bigI, r);
sub_lfvector_lfvector(bhat, lB, bhat, numverts);
/* r = S(b-Ax) */
mul_bfmatrix_lfvector(r, lA, dv);
sub_lfvector_lfvector(r, lB, r, numverts);
filter(r, S);
/* p = SP^-1r */
mul_prevfmatrix_lfvector(p, Pinv, r);
filter(p, S);
/* delta0 = bhat^TP^-1bhat */
mul_prevfmatrix_lfvector(btemp, Pinv, bhat);
delta0 = dot_lfvector(bhat, btemp, numverts);
/* deltaNew = r^TP */
deltaNew = dot_lfvector(r, p, numverts);
# if 0
filter(dv, S);
add_lfvector_lfvector(dv, dv, z, numverts);
mul_bfmatrix_lfvector(r, lA, dv);
sub_lfvector_lfvector(r, lB, r, numverts);
filter(r, S);
mul_prevfmatrix_lfvector(p, Pinv, r);
filter(p, S);
deltaNew = dot_lfvector(r, p, numverts);
delta0 = deltaNew * sqrt(conjgrad_epsilon);
# endif
# ifdef DEBUG_TIME
double start = PIL_check_seconds_timer();
# endif
tol = (0.01 * 0.2);
while ((deltaNew > delta0 * tol * tol) && (iterations < conjgrad_looplimit)) {
iterations++;
mul_bfmatrix_lfvector(s, lA, p);
filter(s, S);
alpha = deltaNew / dot_lfvector(p, s, numverts);
add_lfvector_lfvectorS(dv, dv, p, alpha, numverts);
add_lfvector_lfvectorS(r, r, s, -alpha, numverts);
mul_prevfmatrix_lfvector(h, Pinv, r);
filter(h, S);
deltaOld = deltaNew;
deltaNew = dot_lfvector(r, h, numverts);
add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts);
filter(p, S);
}
# ifdef DEBUG_TIME
double end = PIL_check_seconds_timer();
printf("cg_filtered_pre time: %f\n", (float)(end - start));
# endif
del_lfvector(btemp);
del_lfvector(bhat);
del_lfvector(h);
del_lfvector(s);
del_lfvector(p);
del_lfvector(r);
// printf("iterations: %d\n", iterations);
return iterations < conjgrad_looplimit;
}
# endif
bool SIM_mass_spring_solve_velocities(Implicit_Data *data, float dt, ImplicitSolverResult *result)
{
unsigned int numverts = data->dFdV[0].vcount;
lfVector *dFdXmV = create_lfvector(numverts);
zero_lfvector(data->dV, numverts);
cp_bfmatrix(data->A, data->M);
subadd_bfmatrixS_bfmatrixS(data->A, data->dFdV, dt, data->dFdX, (dt * dt));
mul_bfmatrix_lfvector(dFdXmV, data->dFdX, data->V);
add_lfvectorS_lfvectorS(data->B, data->F, dt, dFdXmV, (dt * dt), numverts);
# ifdef DEBUG_TIME
double start = PIL_check_seconds_timer();
# endif
/* Conjugate gradient algorithm to solve Ax=b. */
cg_filtered(data->dV, data->A, data->B, data->z, data->S, result);
// cg_filtered_pre(id->dV, id->A, id->B, id->z, id->S, id->P, id->Pinv, id->bigI);
# ifdef DEBUG_TIME
double end = PIL_check_seconds_timer();
printf("cg_filtered calc time: %f\n", (float)(end - start));
# endif
/* advance velocities */
add_lfvector_lfvector(data->Vnew, data->V, data->dV, numverts);
del_lfvector(dFdXmV);
return result->status == SIM_SOLVER_SUCCESS;
}
bool SIM_mass_spring_solve_positions(Implicit_Data *data, float dt)
{
int numverts = data->M[0].vcount;
/* advance positions */
add_lfvector_lfvectorS(data->Xnew, data->X, data->Vnew, dt, numverts);
return true;
}
void SIM_mass_spring_apply_result(Implicit_Data *data)
{
int numverts = data->M[0].vcount;
cp_lfvector(data->X, data->Xnew, numverts);
cp_lfvector(data->V, data->Vnew, numverts);
}
void SIM_mass_spring_set_vertex_mass(Implicit_Data *data, int index, float mass)
{
unit_m3(data->M[index].m);
mul_m3_fl(data->M[index].m, mass);
}
void SIM_mass_spring_set_rest_transform(Implicit_Data *data, int index, float tfm[3][3])
{
# ifdef CLOTH_ROOT_FRAME
copy_m3_m3(data->tfm[index].m, tfm);
# else
unit_m3(data->tfm[index].m);
(void)tfm;
# endif
}
void SIM_mass_spring_set_motion_state(Implicit_Data *data,
int index,
const float x[3],
const float v[3])
{
world_to_root_v3(data, index, data->X[index], x);
world_to_root_v3(data, index, data->V[index], v);
}
void SIM_mass_spring_set_position(Implicit_Data *data, int index, const float x[3])
{
world_to_root_v3(data, index, data->X[index], x);
}
void SIM_mass_spring_set_velocity(Implicit_Data *data, int index, const float v[3])
{
world_to_root_v3(data, index, data->V[index], v);
}
void SIM_mass_spring_get_motion_state(struct Implicit_Data *data,
int index,
float x[3],
float v[3])
{
if (x) {
root_to_world_v3(data, index, x, data->X[index]);
}
if (v) {
root_to_world_v3(data, index, v, data->V[index]);
}
}
void SIM_mass_spring_get_position(struct Implicit_Data *data, int index, float x[3])
{
root_to_world_v3(data, index, x, data->X[index]);
}
void SIM_mass_spring_get_velocity(struct Implicit_Data *data, int index, float v[3])
{
root_to_world_v3(data, index, v, data->V[index]);
}
void SIM_mass_spring_get_new_position(struct Implicit_Data *data, int index, float x[3])
{
root_to_world_v3(data, index, x, data->Xnew[index]);
}
void SIM_mass_spring_set_new_position(struct Implicit_Data *data, int index, const float x[3])
{
world_to_root_v3(data, index, data->Xnew[index], x);
}
void SIM_mass_spring_get_new_velocity(struct Implicit_Data *data, int index, float v[3])
{
root_to_world_v3(data, index, v, data->Vnew[index]);
}
void SIM_mass_spring_set_new_velocity(struct Implicit_Data *data, int index, const float v[3])
{
world_to_root_v3(data, index, data->Vnew[index], v);
}
/* -------------------------------- */
static int SIM_mass_spring_add_block(Implicit_Data *data, int v1, int v2)
{
int s = data->M[0].vcount + data->num_blocks; /* index from array start */
BLI_assert(s < data->M[0].vcount + data->M[0].scount);
++data->num_blocks;
/* tfm and S don't have spring entries (diagonal blocks only) */
init_fmatrix(data->bigI + s, v1, v2);
init_fmatrix(data->M + s, v1, v2);
init_fmatrix(data->dFdX + s, v1, v2);
init_fmatrix(data->dFdV + s, v1, v2);
init_fmatrix(data->A + s, v1, v2);
init_fmatrix(data->P + s, v1, v2);
init_fmatrix(data->Pinv + s, v1, v2);
return s;
}
void SIM_mass_spring_clear_constraints(Implicit_Data *data)
{
int i, numverts = data->S[0].vcount;
for (i = 0; i < numverts; i++) {
unit_m3(data->S[i].m);
zero_v3(data->z[i]);
}
}
void SIM_mass_spring_add_constraint_ndof0(Implicit_Data *data, int index, const float dV[3])
{
zero_m3(data->S[index].m);
world_to_root_v3(data, index, data->z[index], dV);
}
void SIM_mass_spring_add_constraint_ndof1(
Implicit_Data *data, int index, const float c1[3], const float c2[3], const float dV[3])
{
float m[3][3], p[3], q[3], u[3], cmat[3][3];
world_to_root_v3(data, index, p, c1);
mul_fvectorT_fvector(cmat, p, p);
sub_m3_m3m3(m, I, cmat);
world_to_root_v3(data, index, q, c2);
mul_fvectorT_fvector(cmat, q, q);
sub_m3_m3m3(m, m, cmat);
/* XXX not sure but multiplication should work here */
copy_m3_m3(data->S[index].m, m);
// mul_m3_m3m3(data->S[index].m, data->S[index].m, m);
world_to_root_v3(data, index, u, dV);
add_v3_v3(data->z[index], u);
}
void SIM_mass_spring_add_constraint_ndof2(Implicit_Data *data,
int index,
const float c1[3],
const float dV[3])
{
float m[3][3], p[3], u[3], cmat[3][3];
world_to_root_v3(data, index, p, c1);
mul_fvectorT_fvector(cmat, p, p);
sub_m3_m3m3(m, I, cmat);
copy_m3_m3(data->S[index].m, m);
// mul_m3_m3m3(data->S[index].m, data->S[index].m, m);
world_to_root_v3(data, index, u, dV);
add_v3_v3(data->z[index], u);
}
void SIM_mass_spring_clear_forces(Implicit_Data *data)
{
int numverts = data->M[0].vcount;
zero_lfvector(data->F, numverts);
init_bfmatrix(data->dFdX, ZERO);
init_bfmatrix(data->dFdV, ZERO);
data->num_blocks = 0;
}
void SIM_mass_spring_force_reference_frame(Implicit_Data *data,
int index,
const float acceleration[3],
const float omega[3],
const float domega_dt[3],
float mass)
{
# ifdef CLOTH_ROOT_FRAME
float acc[3], w[3], dwdt[3];
float f[3], dfdx[3][3], dfdv[3][3];
float euler[3], coriolis[3], centrifugal[3], rotvel[3];
float deuler[3][3], dcoriolis[3][3], dcentrifugal[3][3], drotvel[3][3];
world_to_root_v3(data, index, acc, acceleration);
world_to_root_v3(data, index, w, omega);
world_to_root_v3(data, index, dwdt, domega_dt);
cross_v3_v3v3(euler, dwdt, data->X[index]);
cross_v3_v3v3(coriolis, w, data->V[index]);
mul_v3_fl(coriolis, 2.0f);
cross_v3_v3v3(rotvel, w, data->X[index]);
cross_v3_v3v3(centrifugal, w, rotvel);
sub_v3_v3v3(f, acc, euler);
sub_v3_v3(f, coriolis);
sub_v3_v3(f, centrifugal);
mul_v3_fl(f, mass); /* F = m * a */
cross_v3_identity(deuler, dwdt);
cross_v3_identity(dcoriolis, w);
mul_m3_fl(dcoriolis, 2.0f);
cross_v3_identity(drotvel, w);
cross_m3_v3m3(dcentrifugal, w, drotvel);
add_m3_m3m3(dfdx, deuler, dcentrifugal);
negate_m3(dfdx);
mul_m3_fl(dfdx, mass);
copy_m3_m3(dfdv, dcoriolis);
negate_m3(dfdv);
mul_m3_fl(dfdv, mass);
add_v3_v3(data->F[index], f);
add_m3_m3m3(data->dFdX[index].m, data->dFdX[index].m, dfdx);
add_m3_m3m3(data->dFdV[index].m, data->dFdV[index].m, dfdv);
# else
(void)data;
(void)index;
(void)acceleration;
(void)omega;
(void)domega_dt;
# endif
}
void SIM_mass_spring_force_gravity(Implicit_Data *data, int index, float mass, const float g[3])
{
/* force = mass * acceleration (in this case: gravity) */
float f[3];
world_to_root_v3(data, index, f, g);
mul_v3_fl(f, mass);
add_v3_v3(data->F[index], f);
}
void SIM_mass_spring_force_drag(Implicit_Data *data, float drag)
{
int i, numverts = data->M[0].vcount;
for (i = 0; i < numverts; i++) {
float tmp[3][3];
/* NB: uses root space velocity, no need to transform */
madd_v3_v3fl(data->F[i], data->V[i], -drag);
copy_m3_m3(tmp, I);
mul_m3_fl(tmp, -drag);
add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tmp);
}
}
void SIM_mass_spring_force_extern(
struct Implicit_Data *data, int i, const float f[3], float dfdx[3][3], float dfdv[3][3])
{
float tf[3], tdfdx[3][3], tdfdv[3][3];
world_to_root_v3(data, i, tf, f);
world_to_root_m3(data, i, tdfdx, dfdx);
world_to_root_m3(data, i, tdfdv, dfdv);
add_v3_v3(data->F[i], tf);
add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, tdfdx);
add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tdfdv);
}
static float calc_nor_area_tri(float nor[3],
const float v1[3],
const float v2[3],
const float v3[3])
{
float n1[3], n2[3];
sub_v3_v3v3(n1, v1, v2);
sub_v3_v3v3(n2, v2, v3);
cross_v3_v3v3(nor, n1, n2);
return normalize_v3(nor) / 2.0f;
}
/* XXX does not support force jacobians yet, since the effector system does not provide them either
*/
void SIM_mass_spring_force_face_wind(
Implicit_Data *data, int v1, int v2, int v3, const float (*winvec)[3])
{
const float effector_scale = 0.02f;
const int vs[3] = {v1, v2, v3};
float win[3], nor[3], area;
float factor, base_force;
float force[3];
/* calculate face normal and area */
area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]);
/* The force is calculated and split up evenly for each of the three face verts */
factor = effector_scale * area / 3.0f;
/* Calculate wind pressure at each vertex by projecting the wind field on the normal. */
for (int i = 0; i < 3; i++) {
world_to_root_v3(data, vs[i], win, winvec[vs[i]]);
force[i] = dot_v3v3(win, nor);
}
/* Compute per-vertex force values from local pressures.
* From integrating the pressure over the triangle and deriving
* equivalent vertex forces, it follows that:
*
* force[idx] = (sum(pressure) + pressure[idx]) * area / 12
*
* Effectively, 1/4 of the pressure acts just on its vertex,
* while 3/4 is split evenly over all three.
*/
mul_v3_fl(force, factor / 4.0f);
base_force = force[0] + force[1] + force[2];
/* add pressure to each of the face verts */
madd_v3_v3fl(data->F[v1], nor, base_force + force[0]);
madd_v3_v3fl(data->F[v2], nor, base_force + force[1]);
madd_v3_v3fl(data->F[v3], nor, base_force + force[2]);
}
void SIM_mass_spring_force_face_extern(
Implicit_Data *data, int v1, int v2, int v3, const float (*forcevec)[3])
{
const float effector_scale = 0.02f;
const int vs[3] = {v1, v2, v3};
float nor[3], area;
float factor, base_force[3];
float force[3][3];
/* calculate face normal and area */
area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]);
/* The force is calculated and split up evenly for each of the three face verts */
factor = effector_scale * area / 3.0f;
/* Compute common and per-vertex force vectors from the original inputs. */
zero_v3(base_force);
for (int i = 0; i < 3; i++) {
world_to_root_v3(data, vs[i], force[i], forcevec[vs[i]]);
mul_v3_fl(force[i], factor / 4.0f);
add_v3_v3(base_force, force[i]);
}
/* Apply the common and vertex components to all vertices. */
for (int i = 0; i < 3; i++) {
add_v3_v3(force[i], base_force);
add_v3_v3(data->F[vs[i]], force[i]);
}
}
float SIM_tri_tetra_volume_signed_6x(Implicit_Data *data, int v1, int v2, int v3)
{
/* The result will be 6x the volume */
return volume_tri_tetrahedron_signed_v3_6x(data->X[v1], data->X[v2], data->X[v3]);
}
float SIM_tri_area(struct Implicit_Data *data, int v1, int v2, int v3)
{
float nor[3];
return calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]);
}
void SIM_mass_spring_force_pressure(Implicit_Data *data,
int v1,
int v2,
int v3,
float common_pressure,
const float *vertex_pressure,
const float weights[3])
{
float nor[3], area;
float factor, base_force;
float force[3];
/* calculate face normal and area */
area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]);
/* The force is calculated and split up evenly for each of the three face verts */
factor = area / 3.0f;
base_force = common_pressure * factor;
/* Compute per-vertex force values from local pressures.
* From integrating the pressure over the triangle and deriving
* equivalent vertex forces, it follows that:
*
* force[idx] = (sum(pressure) + pressure[idx]) * area / 12
*
* Effectively, 1/4 of the pressure acts just on its vertex,
* while 3/4 is split evenly over all three.
*/
if (vertex_pressure) {
copy_v3_fl3(force, vertex_pressure[v1], vertex_pressure[v2], vertex_pressure[v3]);
mul_v3_fl(force, factor / 4.0f);
base_force += force[0] + force[1] + force[2];
}
else {
zero_v3(force);
}
/* add pressure to each of the face verts */
madd_v3_v3fl(data->F[v1], nor, (base_force + force[0]) * weights[0]);
madd_v3_v3fl(data->F[v2], nor, (base_force + force[1]) * weights[1]);
madd_v3_v3fl(data->F[v3], nor, (base_force + force[2]) * weights[2]);
}
static void edge_wind_vertex(const float dir[3],
float length,
float radius,
const float wind[3],
float f[3],
float UNUSED(dfdx[3][3]),
float UNUSED(dfdv[3][3]))
{
const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */
float cos_alpha, sin_alpha, cross_section;
float windlen = len_v3(wind);
if (windlen == 0.0f) {
zero_v3(f);
return;
}
/* angle of wind direction to edge */
cos_alpha = dot_v3v3(wind, dir) / windlen;
sin_alpha = sqrtf(1.0f - cos_alpha * cos_alpha);
cross_section = radius * ((float)M_PI * radius * sin_alpha + length * cos_alpha);
mul_v3_v3fl(f, wind, density * cross_section);
}
void SIM_mass_spring_force_edge_wind(
Implicit_Data *data, int v1, int v2, float radius1, float radius2, const float (*winvec)[3])
{
float win[3], dir[3], length;
float f[3], dfdx[3][3], dfdv[3][3];
sub_v3_v3v3(dir, data->X[v1], data->X[v2]);
length = normalize_v3(dir);
world_to_root_v3(data, v1, win, winvec[v1]);
edge_wind_vertex(dir, length, radius1, win, f, dfdx, dfdv);
add_v3_v3(data->F[v1], f);
world_to_root_v3(data, v2, win, winvec[v2]);
edge_wind_vertex(dir, length, radius2, win, f, dfdx, dfdv);
add_v3_v3(data->F[v2], f);
}
void SIM_mass_spring_force_vertex_wind(Implicit_Data *data,
int v,
float UNUSED(radius),
const float (*winvec)[3])
{
const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */
float wind[3];
float f[3];
world_to_root_v3(data, v, wind, winvec[v]);
mul_v3_v3fl(f, wind, density);
add_v3_v3(data->F[v], f);
}
BLI_INLINE void dfdx_spring(float to[3][3], const float dir[3], float length, float L, float k)
{
/* dir is unit length direction, rest is spring's restlength, k is spring constant. */
// return ( (I-outerprod(dir, dir))*Min(1.0f, rest/length) - I) * -k;
outerproduct(to, dir, dir);
sub_m3_m3m3(to, I, to);
mul_m3_fl(to, (L / length));
sub_m3_m3m3(to, to, I);
mul_m3_fl(to, k);
}
/* unused */
# if 0
BLI_INLINE void dfdx_damp(float to[3][3],
const float dir[3],
float length,
const float vel[3],
float rest,
float damping)
{
/* inner spring damping vel is the relative velocity of the endpoints. */
// return (I-outerprod(dir, dir)) * (-damping * -(dot(dir, vel)/Max(length, rest)));
mul_fvectorT_fvector(to, dir, dir);
sub_fmatrix_fmatrix(to, I, to);
mul_fmatrix_S(to, (-damping * -(dot_v3v3(dir, vel) / MAX2(length, rest))));
}
# endif
BLI_INLINE void dfdv_damp(float to[3][3], const float dir[3], float damping)
{
/* derivative of force wrt velocity */
outerproduct(to, dir, dir);
mul_m3_fl(to, -damping);
}
BLI_INLINE float fb(float length, float L)
{
float x = length / L;
float xx = x * x;
float xxx = xx * x;
float xxxx = xxx * x;
return (-11.541f * xxxx + 34.193f * xxx - 39.083f * xx + 23.116f * x - 9.713f);
}
BLI_INLINE float fbderiv(float length, float L)
{
float x = length / L;
float xx = x * x;
float xxx = xx * x;
return (-46.164f * xxx + 102.579f * xx - 78.166f * x + 23.116f);
}
BLI_INLINE float fbstar(float length, float L, float kb, float cb)
{
float tempfb_fl = kb * fb(length, L);
float fbstar_fl = cb * (length - L);
if (tempfb_fl < fbstar_fl) {
return fbstar_fl;
}
return tempfb_fl;
}
/* function to calculae bending spring force (taken from Choi & Co) */
BLI_INLINE float fbstar_jacobi(float length, float L, float kb, float cb)
{
float tempfb_fl = kb * fb(length, L);
float fbstar_fl = cb * (length - L);
if (tempfb_fl < fbstar_fl) {
return -cb;
}
return -kb * fbderiv(length, L);
}
/* calculate elongation */
BLI_INLINE bool spring_length(Implicit_Data *data,
int i,
int j,
float r_extent[3],
float r_dir[3],
float *r_length,
float r_vel[3])
{
sub_v3_v3v3(r_extent, data->X[j], data->X[i]);
sub_v3_v3v3(r_vel, data->V[j], data->V[i]);
*r_length = len_v3(r_extent);
if (*r_length > ALMOST_ZERO) {
# if 0
if (length > L) {
if ((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) &&
(((length - L) * 100.0f / L) > clmd->sim_parms->maxspringlen)) {
/* cut spring! */
s->flags |= CSPRING_FLAG_DEACTIVATE;
return false;
}
}
# endif
mul_v3_v3fl(r_dir, r_extent, 1.0f / (*r_length));
}
else {
zero_v3(r_dir);
}
return true;
}
BLI_INLINE void apply_spring(Implicit_Data *data,
int i,
int j,
const float f[3],
const float dfdx[3][3],
const float dfdv[3][3])
{
int block_ij = SIM_mass_spring_add_block(data, i, j);
add_v3_v3(data->F[i], f);
sub_v3_v3(data->F[j], f);
add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx);
add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfdx);
sub_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfdx);
add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv);
add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfdv);
sub_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfdv);
}
bool SIM_mass_spring_force_spring_linear(Implicit_Data *data,
int i,
int j,
float restlen,
float stiffness_tension,
float damping_tension,
float stiffness_compression,
float damping_compression,
bool resist_compress,
bool new_compress,
float clamp_force)
{
float extent[3], length, dir[3], vel[3];
float f[3], dfdx[3][3], dfdv[3][3];
float damping = 0;
/* calculate elongation */
spring_length(data, i, j, extent, dir, &length, vel);
/* This code computes not only the force, but also its derivative.
* Zero derivative effectively disables the spring for the implicit solver.
* Thus length > restlen makes cloth unconstrained at the start of simulation. */
if ((length >= restlen && length > 0) || resist_compress) {
float stretch_force;
damping = damping_tension;
stretch_force = stiffness_tension * (length - restlen);
if (clamp_force > 0.0f && stretch_force > clamp_force) {
stretch_force = clamp_force;
}
mul_v3_v3fl(f, dir, stretch_force);
dfdx_spring(dfdx, dir, length, restlen, stiffness_tension);
}
else if (new_compress) {
/* This is based on the Choi and Ko bending model,
* which works surprisingly well for compression. */
float kb = stiffness_compression;
float cb = kb; /* cb equal to kb seems to work, but a factor can be added if necessary */
damping = damping_compression;
mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb));
outerproduct(dfdx, dir, dir);
mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb));
}
else {
return false;
}
madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir));
dfdv_damp(dfdv, dir, damping);
apply_spring(data, i, j, f, dfdx, dfdv);
return true;
}
/* See "Stable but Responsive Cloth" (Choi, Ko 2005) */
bool SIM_mass_spring_force_spring_bending(
Implicit_Data *data, int i, int j, float restlen, float kb, float cb)
{
float extent[3], length, dir[3], vel[3];
/* calculate elongation */
spring_length(data, i, j, extent, dir, &length, vel);
if (length < restlen) {
float f[3], dfdx[3][3], dfdv[3][3];
mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb));
outerproduct(dfdx, dir, dir);
mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb));
/* XXX damping not supported */
zero_m3(dfdv);
apply_spring(data, i, j, f, dfdx, dfdv);
return true;
}
return false;
}
BLI_INLINE void poly_avg(lfVector *data, const int *inds, int len, float r_avg[3])
{
float fact = 1.0f / (float)len;
zero_v3(r_avg);
for (int i = 0; i < len; i++) {
madd_v3_v3fl(r_avg, data[inds[i]], fact);
}
}
BLI_INLINE void poly_norm(lfVector *data, int i, int j, int *inds, int len, float r_dir[3])
{
float mid[3];
poly_avg(data, inds, len, mid);
normal_tri_v3(r_dir, data[i], data[j], mid);
}
BLI_INLINE void edge_avg(lfVector *data, int i, int j, float r_avg[3])
{
r_avg[0] = (data[i][0] + data[j][0]) * 0.5f;
r_avg[1] = (data[i][1] + data[j][1]) * 0.5f;
r_avg[2] = (data[i][2] + data[j][2]) * 0.5f;
}
BLI_INLINE void edge_norm(lfVector *data, int i, int j, float r_dir[3])
{
sub_v3_v3v3(r_dir, data[i], data[j]);
normalize_v3(r_dir);
}
BLI_INLINE float bend_angle(const float dir_a[3], const float dir_b[3], const float dir_e[3])
{
float cos, sin;
float tmp[3];
cos = dot_v3v3(dir_a, dir_b);
cross_v3_v3v3(tmp, dir_a, dir_b);
sin = dot_v3v3(tmp, dir_e);
return atan2f(sin, cos);
}
BLI_INLINE void spring_angle(Implicit_Data *data,
int i,
int j,
int *i_a,
int *i_b,
int len_a,
int len_b,
float r_dir_a[3],
float r_dir_b[3],
float *r_angle,
float r_vel_a[3],
float r_vel_b[3])
{
float dir_e[3], vel_e[3];
poly_norm(data->X, j, i, i_a, len_a, r_dir_a);
poly_norm(data->X, i, j, i_b, len_b, r_dir_b);
edge_norm(data->X, i, j, dir_e);
*r_angle = bend_angle(r_dir_a, r_dir_b, dir_e);
poly_avg(data->V, i_a, len_a, r_vel_a);
poly_avg(data->V, i_b, len_b, r_vel_b);
edge_avg(data->V, i, j, vel_e);
sub_v3_v3(r_vel_a, vel_e);
sub_v3_v3(r_vel_b, vel_e);
}
/* Angular springs roughly based on the bending model proposed by Baraff and Witkin in "Large Steps
* in Cloth Simulation". */
bool SIM_mass_spring_force_spring_angular(Implicit_Data *data,
int i,
int j,
int *i_a,
int *i_b,
int len_a,
int len_b,
float restang,
float stiffness,
float damping)
{
float angle, dir_a[3], dir_b[3], vel_a[3], vel_b[3];
float f_a[3], f_b[3], f_e[3];
float force;
int x;
spring_angle(data, i, j, i_a, i_b, len_a, len_b, dir_a, dir_b, &angle, vel_a, vel_b);
/* spring force */
force = stiffness * (angle - restang);
/* damping force */
force += -damping * (dot_v3v3(vel_a, dir_a) + dot_v3v3(vel_b, dir_b));
mul_v3_v3fl(f_a, dir_a, force / len_a);
mul_v3_v3fl(f_b, dir_b, force / len_b);
for (x = 0; x < len_a; x++) {
add_v3_v3(data->F[i_a[x]], f_a);
}
for (x = 0; x < len_b; x++) {
add_v3_v3(data->F[i_b[x]], f_b);
}
mul_v3_v3fl(f_a, dir_a, force * 0.5f);
mul_v3_v3fl(f_b, dir_b, force * 0.5f);
add_v3_v3v3(f_e, f_a, f_b);
sub_v3_v3(data->F[i], f_e);
sub_v3_v3(data->F[j], f_e);
return true;
}
/* Jacobian of a direction vector.
* Basically the part of the differential orthogonal to the direction,
* inversely proportional to the length of the edge.
*
* dD_ij/dx_i = -dD_ij/dx_j = (D_ij * D_ij^T - I) / len_ij
*/
BLI_INLINE void spring_grad_dir(
Implicit_Data *data, int i, int j, float edge[3], float dir[3], float grad_dir[3][3])
{
float length;
sub_v3_v3v3(edge, data->X[j], data->X[i]);
length = normalize_v3_v3(dir, edge);
if (length > ALMOST_ZERO) {
outerproduct(grad_dir, dir, dir);
sub_m3_m3m3(grad_dir, I, grad_dir);
mul_m3_fl(grad_dir, 1.0f / length);
}
else {
zero_m3(grad_dir);
}
}
BLI_INLINE void spring_hairbend_forces(Implicit_Data *data,
int i,
int j,
int k,
const float goal[3],
float stiffness,
float damping,
int q,
const float dx[3],
const float dv[3],
float r_f[3])
{
float edge_ij[3], dir_ij[3];
float edge_jk[3], dir_jk[3];
float vel_ij[3], vel_jk[3], vel_ortho[3];
float f_bend[3], f_damp[3];
float fk[3];
float dist[3];
zero_v3(fk);
sub_v3_v3v3(edge_ij, data->X[j], data->X[i]);
if (q == i) {
sub_v3_v3(edge_ij, dx);
}
if (q == j) {
add_v3_v3(edge_ij, dx);
}
normalize_v3_v3(dir_ij, edge_ij);
sub_v3_v3v3(edge_jk, data->X[k], data->X[j]);
if (q == j) {
sub_v3_v3(edge_jk, dx);
}
if (q == k) {
add_v3_v3(edge_jk, dx);
}
normalize_v3_v3(dir_jk, edge_jk);
sub_v3_v3v3(vel_ij, data->V[j], data->V[i]);
if (q == i) {
sub_v3_v3(vel_ij, dv);
}
if (q == j) {
add_v3_v3(vel_ij, dv);
}
sub_v3_v3v3(vel_jk, data->V[k], data->V[j]);
if (q == j) {
sub_v3_v3(vel_jk, dv);
}
if (q == k) {
add_v3_v3(vel_jk, dv);
}
/* bending force */
sub_v3_v3v3(dist, goal, edge_jk);
mul_v3_v3fl(f_bend, dist, stiffness);
add_v3_v3(fk, f_bend);
/* damping force */
madd_v3_v3v3fl(vel_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk));
mul_v3_v3fl(f_damp, vel_ortho, damping);
sub_v3_v3(fk, f_damp);
copy_v3_v3(r_f, fk);
}
/* Finite Differences method for estimating the jacobian of the force */
BLI_INLINE void spring_hairbend_estimate_dfdx(Implicit_Data *data,
int i,
int j,
int k,
const float goal[3],
float stiffness,
float damping,
int q,
float dfdx[3][3])
{
const float delta = 0.00001f; /* TODO find a good heuristic for this */
float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3];
float f[3];
int a, b;
zero_m3(dvec_null);
unit_m3(dvec_pos);
mul_m3_fl(dvec_pos, delta * 0.5f);
copy_m3_m3(dvec_neg, dvec_pos);
negate_m3(dvec_neg);
/* XXX TODO offset targets to account for position dependency */
for (a = 0; a < 3; a++) {
spring_hairbend_forces(
data, i, j, k, goal, stiffness, damping, q, dvec_pos[a], dvec_null[a], f);
copy_v3_v3(dfdx[a], f);
spring_hairbend_forces(
data, i, j, k, goal, stiffness, damping, q, dvec_neg[a], dvec_null[a], f);
sub_v3_v3(dfdx[a], f);
for (b = 0; b < 3; b++) {
dfdx[a][b] /= delta;
}
}
}
/* Finite Differences method for estimating the jacobian of the force */
BLI_INLINE void spring_hairbend_estimate_dfdv(Implicit_Data *data,
int i,
int j,
int k,
const float goal[3],
float stiffness,
float damping,
int q,
float dfdv[3][3])
{
const float delta = 0.00001f; /* TODO find a good heuristic for this */
float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3];
float f[3];
int a, b;
zero_m3(dvec_null);
unit_m3(dvec_pos);
mul_m3_fl(dvec_pos, delta * 0.5f);
copy_m3_m3(dvec_neg, dvec_pos);
negate_m3(dvec_neg);
/* XXX TODO offset targets to account for position dependency */
for (a = 0; a < 3; a++) {
spring_hairbend_forces(
data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_pos[a], f);
copy_v3_v3(dfdv[a], f);
spring_hairbend_forces(
data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_neg[a], f);
sub_v3_v3(dfdv[a], f);
for (b = 0; b < 3; b++) {
dfdv[a][b] /= delta;
}
}
}
/* Angular spring that pulls the vertex toward the local target
* See "Artistic Simulation of Curly Hair" (Pixar technical memo #12-03a)
*/
bool SIM_mass_spring_force_spring_bending_hair(Implicit_Data *data,
int i,
int j,
int k,
const float target[3],
float stiffness,
float damping)
{
float goal[3];
float fj[3], fk[3];
float dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3];
float dfj_dvi[3][3], dfj_dvj[3][3], dfk_dvi[3][3], dfk_dvj[3][3], dfk_dvk[3][3];
const float vecnull[3] = {0.0f, 0.0f, 0.0f};
int block_ij = SIM_mass_spring_add_block(data, i, j);
int block_jk = SIM_mass_spring_add_block(data, j, k);
int block_ik = SIM_mass_spring_add_block(data, i, k);
world_to_root_v3(data, j, goal, target);
spring_hairbend_forces(data, i, j, k, goal, stiffness, damping, k, vecnull, vecnull, fk);
negate_v3_v3(fj, fk); /* counterforce */
spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, i, dfk_dxi);
spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, j, dfk_dxj);
spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, k, dfk_dxk);
copy_m3_m3(dfj_dxi, dfk_dxi);
negate_m3(dfj_dxi);
copy_m3_m3(dfj_dxj, dfk_dxj);
negate_m3(dfj_dxj);
spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, i, dfk_dvi);
spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, j, dfk_dvj);
spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, k, dfk_dvk);
copy_m3_m3(dfj_dvi, dfk_dvi);
negate_m3(dfj_dvi);
copy_m3_m3(dfj_dvj, dfk_dvj);
negate_m3(dfj_dvj);
/* add forces and jacobians to the solver data */
add_v3_v3(data->F[j], fj);
add_v3_v3(data->F[k], fk);
add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj);
add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk);
add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi);
add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj);
add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi);
add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfj_dvj);
add_m3_m3m3(data->dFdV[k].m, data->dFdV[k].m, dfk_dvk);
add_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfj_dvi);
add_m3_m3m3(data->dFdV[block_jk].m, data->dFdV[block_jk].m, dfk_dvj);
add_m3_m3m3(data->dFdV[block_ik].m, data->dFdV[block_ik].m, dfk_dvi);
/* XXX analytical calculation of derivatives below is incorrect.
* This proved to be difficult, but for now just using the finite difference method for
* estimating the jacobians should be sufficient.
*/
# if 0
float edge_ij[3], dir_ij[3], grad_dir_ij[3][3];
float edge_jk[3], dir_jk[3], grad_dir_jk[3][3];
float dist[3], vel_jk[3], vel_jk_ortho[3], projvel[3];
float target[3];
float tmp[3][3];
float fi[3], fj[3], fk[3];
float dfi_dxi[3][3], dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3];
float dfdvi[3][3];
/* TESTING */
damping = 0.0f;
zero_v3(fi);
zero_v3(fj);
zero_v3(fk);
zero_m3(dfi_dxi);
zero_m3(dfj_dxi);
zero_m3(dfk_dxi);
zero_m3(dfk_dxj);
zero_m3(dfk_dxk);
/* jacobian of direction vectors */
spring_grad_dir(data, i, j, edge_ij, dir_ij, grad_dir_ij);
spring_grad_dir(data, j, k, edge_jk, dir_jk, grad_dir_jk);
sub_v3_v3v3(vel_jk, data->V[k], data->V[j]);
/* bending force */
mul_v3_v3fl(target, dir_ij, restlen);
sub_v3_v3v3(dist, target, edge_jk);
mul_v3_v3fl(fk, dist, stiffness);
/* damping force */
madd_v3_v3v3fl(vel_jk_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk));
madd_v3_v3fl(fk, vel_jk_ortho, damping);
/* XXX this only holds true as long as we assume straight rest shape!
* eventually will become a bit more involved since the opposite segment
* gets its own target, under condition of having equal torque on both sides.
*/
copy_v3_v3(fi, fk);
/* counterforce on the middle point */
sub_v3_v3(fj, fi);
sub_v3_v3(fj, fk);
/* === derivatives === */
madd_m3_m3fl(dfk_dxi, grad_dir_ij, stiffness * restlen);
madd_m3_m3fl(dfk_dxj, grad_dir_ij, -stiffness * restlen);
madd_m3_m3fl(dfk_dxj, I, stiffness);
madd_m3_m3fl(dfk_dxk, I, -stiffness);
copy_m3_m3(dfi_dxi, dfk_dxk);
negate_m3(dfi_dxi);
/* dfj_dfi == dfi_dfj due to symmetry,
* dfi_dfj == dfk_dfj due to fi == fk
* XXX see comment above on future bent rest shapes
*/
copy_m3_m3(dfj_dxi, dfk_dxj);
/* dfj_dxj == -(dfi_dxj + dfk_dxj) due to fj == -(fi + fk) */
sub_m3_m3m3(dfj_dxj, dfj_dxj, dfj_dxi);
sub_m3_m3m3(dfj_dxj, dfj_dxj, dfk_dxj);
/* add forces and jacobians to the solver data */
add_v3_v3(data->F[i], fi);
add_v3_v3(data->F[j], fj);
add_v3_v3(data->F[k], fk);
add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfi_dxi);
add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj);
add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk);
add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi);
add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj);
add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi);
# endif
return true;
}
bool SIM_mass_spring_force_spring_goal(Implicit_Data *data,
int i,
const float goal_x[3],
const float goal_v[3],
float stiffness,
float damping)
{
float root_goal_x[3], root_goal_v[3], extent[3], length, dir[3], vel[3];
float f[3], dfdx[3][3], dfdv[3][3];
/* goal is in world space */
world_to_root_v3(data, i, root_goal_x, goal_x);
world_to_root_v3(data, i, root_goal_v, goal_v);
sub_v3_v3v3(extent, root_goal_x, data->X[i]);
sub_v3_v3v3(vel, root_goal_v, data->V[i]);
length = normalize_v3_v3(dir, extent);
if (length > ALMOST_ZERO) {
mul_v3_v3fl(f, dir, stiffness * length);
/* Ascher & Boxman, p.21: Damping only during elongation
* something wrong with it. */
madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir));
dfdx_spring(dfdx, dir, length, 0.0f, stiffness);
dfdv_damp(dfdv, dir, damping);
add_v3_v3(data->F[i], f);
add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx);
add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv);
return true;
}
return false;
}
#endif /* IMPLICIT_SOLVER_BLENDER */
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-11,24));t3<=min(min(min(floord(4*Nt+Ny-9,24),floord(12*t1+Ny+15,24)),floord(24*t2+Ny+11,24)),floord(24*t1-24*t2+Nz+Ny+13,24));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-6,8)),ceild(3*t1-14,16)),ceild(24*t2-Nz-51,64)),ceild(24*t3-Ny-51,64));t4<=min(min(min(min(floord(4*Nt+Nx-9,64),floord(12*t1+Nx+15,64)),floord(24*t2+Nx+11,64)),floord(24*t3+Nx+11,64)),floord(24*t1-24*t2+Nz+Nx+13,64));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(64*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),6*t3+4),16*t4+14);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(64*t4,4*t5+4);
ubv=min(64*t4+63,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
pr79428-2.c | /* PR c/79428 */
/* { dg-options "-fopenmp" } */
void
foo ()
{
#pragma omp sections
#pragma omp section /* { dg-error "'#pragma omp section' may only be used in '#pragma omp sections' construct|not allowed" } */
// { dg-error "expected" "end" { target c } .-1 }
// { dg-error "-:expected" "end" { target c++ } .+1 }
|
irbuilder_nested_parallel_for.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -x c++ -emit-llvm %s -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefixes=CHECK %s
// RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -verify %s -emit-llvm -o - | FileCheck --check-prefixes=CHECK-DEBUG %s
// expected-no-diagnostics
// TODO: Teach the update script to check new functions too.
#ifndef HEADER
#define HEADER
// CHECK-LABEL: @_Z14parallel_for_0v(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB1:@.*]])
// CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK: omp_parallel:
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @_Z14parallel_for_0v..omp_par to void (i32*, i32*, ...)*))
// CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
// CHECK: omp.par.outlined.exit:
// CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK: omp.par.exit.split:
// CHECK-NEXT: ret void
//
// CHECK-DEBUG-LABEL: @_Z14parallel_for_0v(
// CHECK-DEBUG-NEXT: entry:
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB1:@.*]]), [[DBG10:!dbg !.*]]
// CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK-DEBUG: omp_parallel:
// CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @_Z14parallel_for_0v..omp_par to void (i32*, i32*, ...)*)), [[DBG11:!dbg !.*]]
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
// CHECK-DEBUG: omp.par.outlined.exit:
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK-DEBUG: omp.par.exit.split:
// CHECK-DEBUG-NEXT: ret void, [[DBG14:!dbg !.*]]
//
void parallel_for_0(void) {
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i) {
}
}
}
// CHECK-LABEL: @_Z14parallel_for_1Pfid(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// CHECK-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB1]])
// CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK: omp_parallel:
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double*, float**)* @_Z14parallel_for_1Pfid..omp_par.1 to void (i32*, i32*, ...)*), i32* [[A_ADDR]], double* [[B_ADDR]], float** [[R_ADDR]])
// CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT19:%.*]]
// CHECK: omp.par.outlined.exit19:
// CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK: omp.par.exit.split:
// CHECK-NEXT: ret void
//
// CHECK-DEBUG-LABEL: @_Z14parallel_for_1Pfid(
// CHECK-DEBUG-NEXT: entry:
// CHECK-DEBUG-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// CHECK-DEBUG-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// CHECK-DEBUG-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata float** [[R_ADDR]], [[META41:metadata !.*]], metadata !DIExpression()), [[DBG42:!dbg !.*]]
// CHECK-DEBUG-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[A_ADDR]], [[META43:metadata !.*]], metadata !DIExpression()), [[DBG44:!dbg !.*]]
// CHECK-DEBUG-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata double* [[B_ADDR]], [[META45:metadata !.*]], metadata !DIExpression()), [[DBG46:!dbg !.*]]
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB10:@.*]]), [[DBG47:!dbg !.*]]
// CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK-DEBUG: omp_parallel:
// CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB10]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double*, float**)* @_Z14parallel_for_1Pfid..omp_par.1 to void (i32*, i32*, ...)*), i32* [[A_ADDR]], double* [[B_ADDR]], float** [[R_ADDR]]), [[DBG48:!dbg !.*]]
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT19:%.*]]
// CHECK-DEBUG: omp.par.outlined.exit19:
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK-DEBUG: omp.par.exit.split:
// CHECK-DEBUG-NEXT: ret void, [[DBG50:!dbg !.*]]
//
void parallel_for_1(float *r, int a, double b) {
#pragma omp parallel
{
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i) {
*r = a + b;
}
}
}
}
// CHECK-LABEL: @_Z14parallel_for_2Pfid(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: [[DOTOMP_IV212:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[TMP213:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_LB214:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_UB215:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_STRIDE216:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_IS_LAST217:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I218:%.*]] = alloca i32, align 4
// CHECK-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// CHECK-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB1]])
// CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK: omp_parallel:
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double*, float**)* @_Z14parallel_for_2Pfid..omp_par.4 to void (i32*, i32*, ...)*), i32* [[A_ADDR]], double* [[B_ADDR]], float** [[R_ADDR]])
// CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT211:%.*]]
// CHECK: omp.par.outlined.exit211:
// CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK: omp.par.exit.split:
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_LB214]], align 4
// CHECK-NEXT: store i32 99, i32* [[DOTOMP_UB215]], align 4
// CHECK-NEXT: store i32 1, i32* [[DOTOMP_STRIDE216]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST217]], align 4
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM219:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB41:@.*]])
// CHECK-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* [[GLOB2:@.*]], i32 [[OMP_GLOBAL_THREAD_NUM219]], i32 34, i32* [[DOTOMP_IS_LAST217]], i32* [[DOTOMP_LB214]], i32* [[DOTOMP_UB215]], i32* [[DOTOMP_STRIDE216]], i32 1, i32 1)
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_UB215]], align 4
// CHECK-NEXT: [[CMP220:%.*]] = icmp sgt i32 [[TMP0]], 99
// CHECK-NEXT: br i1 [[CMP220]], label [[COND_TRUE221:%.*]], label [[COND_FALSE222:%.*]]
// CHECK: cond.true221:
// CHECK-NEXT: br label [[COND_END223:%.*]]
// CHECK: cond.false222:
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_UB215]], align 4
// CHECK-NEXT: br label [[COND_END223]]
// CHECK: cond.end223:
// CHECK-NEXT: [[COND224:%.*]] = phi i32 [ 99, [[COND_TRUE221]] ], [ [[TMP1]], [[COND_FALSE222]] ]
// CHECK-NEXT: store i32 [[COND224]], i32* [[DOTOMP_UB215]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_LB214]], align 4
// CHECK-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_IV212]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND225:%.*]]
// CHECK: omp.inner.for.cond225:
// CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV212]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB215]], align 4
// CHECK-NEXT: [[CMP226:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]]
// CHECK-NEXT: br i1 [[CMP226]], label [[OMP_INNER_FOR_BODY227:%.*]], label [[OMP_INNER_FOR_END236:%.*]]
// CHECK: omp.inner.for.body227:
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV212]], align 4
// CHECK-NEXT: [[MUL228:%.*]] = mul nsw i32 [[TMP5]], 1
// CHECK-NEXT: [[ADD229:%.*]] = add nsw i32 0, [[MUL228]]
// CHECK-NEXT: store i32 [[ADD229]], i32* [[I218]], align 4
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK-NEXT: [[CONV230:%.*]] = sitofp i32 [[TMP6]] to double
// CHECK-NEXT: [[TMP7:%.*]] = load double, double* [[B_ADDR]], align 8
// CHECK-NEXT: [[ADD231:%.*]] = fadd double [[CONV230]], [[TMP7]]
// CHECK-NEXT: [[CONV232:%.*]] = fptrunc double [[ADD231]] to float
// CHECK-NEXT: [[TMP8:%.*]] = load float*, float** [[R_ADDR]], align 8
// CHECK-NEXT: store float [[CONV232]], float* [[TMP8]], align 4
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE233:%.*]]
// CHECK: omp.body.continue233:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC234:%.*]]
// CHECK: omp.inner.for.inc234:
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV212]], align 4
// CHECK-NEXT: [[ADD235:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NEXT: store i32 [[ADD235]], i32* [[DOTOMP_IV212]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND225]]
// CHECK: omp.inner.for.end236:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT237:%.*]]
// CHECK: omp.loop.exit237:
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM238:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB43:@.*]])
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* [[GLOB2]], i32 [[OMP_GLOBAL_THREAD_NUM238]])
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM239:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB1]])
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* [[GLOB7:@.*]], i32 [[OMP_GLOBAL_THREAD_NUM239]])
// CHECK-NEXT: ret void
//
// CHECK-DEBUG-LABEL: @_Z14parallel_for_2Pfid(
// CHECK-DEBUG-NEXT: entry:
// CHECK-DEBUG-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// CHECK-DEBUG-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// CHECK-DEBUG-NEXT: [[DOTOMP_IV212:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[TMP213:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[DOTOMP_LB214:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[DOTOMP_UB215:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[DOTOMP_STRIDE216:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[DOTOMP_IS_LAST217:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[I218:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata float** [[R_ADDR]], [[META77:metadata !.*]], metadata !DIExpression()), [[DBG78:!dbg !.*]]
// CHECK-DEBUG-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[A_ADDR]], [[META79:metadata !.*]], metadata !DIExpression()), [[DBG80:!dbg !.*]]
// CHECK-DEBUG-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata double* [[B_ADDR]], [[META81:metadata !.*]], metadata !DIExpression()), [[DBG82:!dbg !.*]]
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB21:@.*]]), [[DBG83:!dbg !.*]]
// CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK-DEBUG: omp_parallel:
// CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB21]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double*, float**)* @_Z14parallel_for_2Pfid..omp_par.4 to void (i32*, i32*, ...)*), i32* [[A_ADDR]], double* [[B_ADDR]], float** [[R_ADDR]]), [[DBG84:!dbg !.*]]
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT211:%.*]]
// CHECK-DEBUG: omp.par.outlined.exit211:
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK-DEBUG: omp.par.exit.split:
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_IV212]], [[META87:metadata !.*]], metadata !DIExpression()), [[DBG89:!dbg !.*]]
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_LB214]], [[META90:metadata !.*]], metadata !DIExpression()), [[DBG89]]
// CHECK-DEBUG-NEXT: store i32 0, i32* [[DOTOMP_LB214]], align 4, [[DBG91:!dbg !.*]]
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_UB215]], [[META92:metadata !.*]], metadata !DIExpression()), [[DBG89]]
// CHECK-DEBUG-NEXT: store i32 99, i32* [[DOTOMP_UB215]], align 4, [[DBG91]]
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_STRIDE216]], [[META93:metadata !.*]], metadata !DIExpression()), [[DBG89]]
// CHECK-DEBUG-NEXT: store i32 1, i32* [[DOTOMP_STRIDE216]], align 4, [[DBG91]]
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_IS_LAST217]], [[META94:metadata !.*]], metadata !DIExpression()), [[DBG89]]
// CHECK-DEBUG-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST217]], align 4, [[DBG91]]
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[I218]], [[META95:metadata !.*]], metadata !DIExpression()), [[DBG89]]
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM219:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB79:@.*]])
// CHECK-DEBUG-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* [[GLOB78:@.*]], i32 [[OMP_GLOBAL_THREAD_NUM219]], i32 34, i32* [[DOTOMP_IS_LAST217]], i32* [[DOTOMP_LB214]], i32* [[DOTOMP_UB215]], i32* [[DOTOMP_STRIDE216]], i32 1, i32 1), [[DBG96:!dbg !.*]]
// CHECK-DEBUG-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_UB215]], align 4, [[DBG91]]
// CHECK-DEBUG-NEXT: [[CMP220:%.*]] = icmp sgt i32 [[TMP0]], 99, [[DBG91]]
// CHECK-DEBUG-NEXT: br i1 [[CMP220]], label [[COND_TRUE221:%.*]], label [[COND_FALSE222:%.*]], [[DBG91]]
// CHECK-DEBUG: cond.true221:
// CHECK-DEBUG-NEXT: br label [[COND_END223:%.*]], [[DBG91]]
// CHECK-DEBUG: cond.false222:
// CHECK-DEBUG-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_UB215]], align 4, [[DBG91]]
// CHECK-DEBUG-NEXT: br label [[COND_END223]], [[DBG91]]
// CHECK-DEBUG: cond.end223:
// CHECK-DEBUG-NEXT: [[COND224:%.*]] = phi i32 [ 99, [[COND_TRUE221]] ], [ [[TMP1]], [[COND_FALSE222]] ], [[DBG91]]
// CHECK-DEBUG-NEXT: store i32 [[COND224]], i32* [[DOTOMP_UB215]], align 4, [[DBG91]]
// CHECK-DEBUG-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_LB214]], align 4, [[DBG91]]
// CHECK-DEBUG-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_IV212]], align 4, [[DBG91]]
// CHECK-DEBUG-NEXT: br label [[OMP_INNER_FOR_COND225:%.*]], [[DBG97:!dbg !.*]]
// CHECK-DEBUG: omp.inner.for.cond225:
// CHECK-DEBUG-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV212]], align 4, [[DBG91]]
// CHECK-DEBUG-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB215]], align 4, [[DBG91]]
// CHECK-DEBUG-NEXT: [[CMP226:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]], [[DBG98:!dbg !.*]]
// CHECK-DEBUG-NEXT: br i1 [[CMP226]], label [[OMP_INNER_FOR_BODY227:%.*]], label [[OMP_INNER_FOR_END236:%.*]], [[DBG97]]
// CHECK-DEBUG: omp.inner.for.body227:
// CHECK-DEBUG-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV212]], align 4, [[DBG91]]
// CHECK-DEBUG-NEXT: [[MUL228:%.*]] = mul nsw i32 [[TMP5]], 1, [[DBG99:!dbg !.*]]
// CHECK-DEBUG-NEXT: [[ADD229:%.*]] = add nsw i32 0, [[MUL228]], [[DBG99]]
// CHECK-DEBUG-NEXT: store i32 [[ADD229]], i32* [[I218]], align 4, [[DBG99]]
// CHECK-DEBUG-NEXT: [[TMP6:%.*]] = load i32, i32* [[A_ADDR]], align 4, [[DBG100:!dbg !.*]]
// CHECK-DEBUG-NEXT: [[CONV230:%.*]] = sitofp i32 [[TMP6]] to double, [[DBG100]]
// CHECK-DEBUG-NEXT: [[TMP7:%.*]] = load double, double* [[B_ADDR]], align 8, [[DBG101:!dbg !.*]]
// CHECK-DEBUG-NEXT: [[ADD231:%.*]] = fadd double [[CONV230]], [[TMP7]], [[DBG102:!dbg !.*]]
// CHECK-DEBUG-NEXT: [[CONV232:%.*]] = fptrunc double [[ADD231]] to float, [[DBG100]]
// CHECK-DEBUG-NEXT: [[TMP8:%.*]] = load float*, float** [[R_ADDR]], align 8, [[DBG103:!dbg !.*]]
// CHECK-DEBUG-NEXT: store float [[CONV232]], float* [[TMP8]], align 4, [[DBG104:!dbg !.*]]
// CHECK-DEBUG-NEXT: br label [[OMP_BODY_CONTINUE233:%.*]], [[DBG105:!dbg !.*]]
// CHECK-DEBUG: omp.body.continue233:
// CHECK-DEBUG-NEXT: br label [[OMP_INNER_FOR_INC234:%.*]], [[DBG96]]
// CHECK-DEBUG: omp.inner.for.inc234:
// CHECK-DEBUG-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV212]], align 4, [[DBG91]]
// CHECK-DEBUG-NEXT: [[ADD235:%.*]] = add nsw i32 [[TMP9]], 1, [[DBG98]]
// CHECK-DEBUG-NEXT: store i32 [[ADD235]], i32* [[DOTOMP_IV212]], align 4, [[DBG98]]
// CHECK-DEBUG-NEXT: br label [[OMP_INNER_FOR_COND225]], [[DBG96]], [[LOOP106:!llvm.loop !.*]]
// CHECK-DEBUG: omp.inner.for.end236:
// CHECK-DEBUG-NEXT: br label [[OMP_LOOP_EXIT237:%.*]], [[DBG96]]
// CHECK-DEBUG: omp.loop.exit237:
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM238:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB82:@.*]])
// CHECK-DEBUG-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* [[GLOB81:@.*]], i32 [[OMP_GLOBAL_THREAD_NUM238]]), [[DBG107:!dbg !.*]]
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM239:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[GLOB82]]), [[DBG107]]
// CHECK-DEBUG-NEXT: call void @__kmpc_barrier(%struct.ident_t* [[GLOB83:@.*]], i32 [[OMP_GLOBAL_THREAD_NUM239]]), [[DBG107]]
// CHECK-DEBUG-NEXT: ret void, [[DBG108:!dbg !.*]]
//
void parallel_for_2(float *r, int a, double b) {
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#endif
|
SPHCalcDensityFunctor.h | /**
* @file SPHCalcDensityFunctor.h
* @author seckler
* @date 19.01.18
*/
#pragma once
#include "autopas/pairwiseFunctors/Functor.h"
#include "autopas/particles/OwnershipState.h"
#include "autopas/sph/SPHKernels.h"
namespace autopas::sph {
/**
* Class that defines the density functor.
* It is used to calculate the density based on the given SPH kernel.
* @tparam Particle
* @tparam ParticleCell
*/
template <class Particle>
class SPHCalcDensityFunctor : public Functor<Particle, SPHCalcDensityFunctor<Particle>> {
public:
/// soa arrays type
using SoAArraysType = typename Particle::SoAArraysType;
SPHCalcDensityFunctor() : autopas::Functor<Particle, SPHCalcDensityFunctor<Particle>>(0.){};
bool isRelevantForTuning() override { return true; }
bool allowsNewton3() override { return true; }
bool allowsNonNewton3() override { return true; }
/**
* Calculates the density contribution of the interaction of particle i and j.
* It is not symmetric, because the smoothing lenghts of the two particles can
* be different.
* @param i first particle of the interaction
* @param j second particle of the interaction
* @param newton3 defines whether or whether not to use newton 3
*/
inline void AoSFunctor(Particle &i, Particle &j, bool newton3 = true) override {
if (i.isDummy() or j.isDummy()) {
return;
}
const std::array<double, 3> dr = utils::ArrayMath::sub(j.getR(), i.getR()); // ep_j[j].pos - ep_i[i].pos;
const double density =
j.getMass() * SPHKernels::W(dr, i.getSmoothingLength()); // ep_j[j].mass * W(dr, ep_i[i].smth)
i.addDensity(density);
if (newton3) {
// Newton 3:
// W is symmetric in dr, so no -dr needed, i.e. we can reuse dr
const double density2 = i.getMass() * SPHKernels::W(dr, j.getSmoothingLength());
j.addDensity(density2);
}
}
/**
* Get the number of floating point operations used in one full kernel call
* @return the number of floating point operations
*/
static unsigned long getNumFlopsPerKernelCall() {
unsigned long flops = 0;
flops += 3; // calculating dr
flops += 2 * SPHKernels::getFlopsW(); // flops for calling W
flops += 2 * 1; // calculating density
flops += 2 * 1; // adding density
return flops;
}
/**
* @copydoc Functor::SoAFunctorSingle(SoAView<SoAArraysType>, bool)
* This functor ignores the newton3 value, as we do not expect any benefit from disabling newton3.
*/
void SoAFunctorSingle(SoAView<SoAArraysType> soa, bool newton3) override {
if (soa.getNumParticles() == 0) return;
double *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>();
double *const __restrict densityptr = soa.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr = soa.template begin<Particle::AttributeNames::smth>();
double *const __restrict massptr = soa.template begin<Particle::AttributeNames::mass>();
const auto *const __restrict ownedStatePtr = soa.template begin<Particle::AttributeNames::ownershipState>();
size_t numParticles = soa.getNumParticles();
for (unsigned int i = 0; i < numParticles; ++i) {
// checks whether particle i is owned.
if (ownedStatePtr[i] == OwnershipState::dummy) {
continue;
}
double densacc = 0.;
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
#pragma omp simd reduction(+ : densacc)
for (unsigned int j = i + 1; j < numParticles; ++j) {
const double drx = xptr[i] - xptr[j];
const double dry = yptr[i] - yptr[j];
const double drz = zptr[i] - zptr[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
// if second particle is a dummy, we skip the interaction.
const bool mask = ownedStatePtr[j] != OwnershipState::dummy;
const double density = mask ? massptr[j] * SPHKernels::W(dr2, smthptr[i]) : 0.;
densacc += density;
// Newton 3:
// W is symmetric in dr, so no -dr needed, i.e. we can reuse dr
const double density2 = mask ? massptr[i] * SPHKernels::W(dr2, smthptr[j]) : 0.;
densityptr[j] += density2;
}
densityptr[i] += densacc;
}
}
/**
* @copydoc Functor::SoAFunctorPair(SoAView<SoAArraysType>, SoAView<SoAArraysType>, bool)
*/
void SoAFunctorPair(SoAView<SoAArraysType> soa1, SoAView<SoAArraysType> soa2, bool newton3) override {
if (soa1.getNumParticles() == 0 || soa2.getNumParticles() == 0) return;
double *const __restrict xptr1 = soa1.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr1 = soa1.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr1 = soa1.template begin<Particle::AttributeNames::posZ>();
double *const __restrict densityptr1 = soa1.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr1 = soa1.template begin<Particle::AttributeNames::smth>();
double *const __restrict massptr1 = soa1.template begin<Particle::AttributeNames::mass>();
double *const __restrict xptr2 = soa2.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr2 = soa2.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr2 = soa2.template begin<Particle::AttributeNames::posZ>();
double *const __restrict densityptr2 = soa2.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr2 = soa2.template begin<Particle::AttributeNames::smth>();
double *const __restrict massptr2 = soa2.template begin<Particle::AttributeNames::mass>();
const auto *const __restrict ownedStatePtr1 = soa1.template begin<Particle::AttributeNames::ownershipState>();
const auto *const __restrict ownedStatePtr2 = soa2.template begin<Particle::AttributeNames::ownershipState>();
size_t numParticlesi = soa1.getNumParticles();
for (unsigned int i = 0; i < numParticlesi; ++i) {
// checks whether particle i is in the domain box, unused if calculateGlobals is false!
if (ownedStatePtr1[i] == OwnershipState::dummy) {
continue;
}
double densacc = 0.;
size_t numParticlesj = soa2.getNumParticles();
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
#pragma omp simd reduction(+ : densacc)
for (unsigned int j = 0; j < numParticlesj; ++j) {
const double drx = xptr1[i] - xptr2[j];
const double dry = yptr1[i] - yptr2[j];
const double drz = zptr1[i] - zptr2[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
// if second particle is a dummy, we skip the interaction.
const bool mask = ownedStatePtr2[j] != OwnershipState::dummy;
const double density = mask ? massptr2[j] * SPHKernels::W(dr2, smthptr1[i]) : 0.;
densacc += density;
if (newton3) {
// Newton 3:
// W is symmetric in dr, so no -dr needed, i.e. we can reuse dr
const double density2 = mask ? massptr1[i] * SPHKernels::W(dr2, smthptr2[j]) : 0.;
densityptr2[j] += density2;
}
}
densityptr1[i] += densacc;
}
}
// clang-format off
/**
* @copydoc Functor::SoAFunctorVerlet(SoAView<SoAArraysType> soa, const size_t indexFirst, const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList, bool newton3)
*/
// clang-format on
void SoAFunctorVerlet(SoAView<SoAArraysType> soa, const size_t indexFirst,
const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList,
bool newton3) override {
if (soa.getNumParticles() == 0) return;
const auto *const __restrict ownedStatePtr = soa.template begin<Particle::AttributeNames::ownershipState>();
// checks whether particle i is owned.
if (ownedStatePtr[indexFirst] == OwnershipState::dummy) {
return;
}
double *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>();
double *const __restrict densityptr = soa.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr = soa.template begin<Particle::AttributeNames::smth>();
double *const __restrict massptr = soa.template begin<Particle::AttributeNames::mass>();
double densacc = 0;
const auto ¤tList = neighborList;
size_t listSize = currentList.size();
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
#pragma omp simd reduction(+ : densacc)
for (unsigned int j = 0; j < listSize; ++j) {
const double drx = xptr[indexFirst] - xptr[currentList[j]];
const double dry = yptr[indexFirst] - yptr[currentList[j]];
const double drz = zptr[indexFirst] - zptr[currentList[j]];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
// if second particle is a dummy, we skip the interaction.
const bool mask = ownedStatePtr[currentList[j]] != OwnershipState::dummy;
const double density = mask ? massptr[currentList[j]] * SPHKernels::W(dr2, smthptr[indexFirst]) : 0.;
densacc += density;
if (newton3) {
// Newton 3:
// W is symmetric in dr, so no -dr needed, i.e. we can reuse dr
const double density2 = mask ? massptr[indexFirst] * SPHKernels::W(dr2, smthptr[currentList[j]]) : 0.;
densityptr[currentList[j]] += density2;
}
}
densityptr[indexFirst] += densacc;
}
/**
* @copydoc Functor::getNeededAttr()
*/
constexpr static auto getNeededAttr() {
return std::array<typename Particle::AttributeNames, 7>{
Particle::AttributeNames::mass, Particle::AttributeNames::posX, Particle::AttributeNames::posY,
Particle::AttributeNames::posZ, Particle::AttributeNames::smth, Particle::AttributeNames::density,
Particle::AttributeNames::ownershipState};
}
/**
* @copydoc Functor::getNeededAttr(std::false_type)
*/
constexpr static auto getNeededAttr(std::false_type) {
return std::array<typename Particle::AttributeNames, 6>{
Particle::AttributeNames::mass, Particle::AttributeNames::posX, Particle::AttributeNames::posY,
Particle::AttributeNames::posZ, Particle::AttributeNames::smth, Particle::AttributeNames::ownershipState};
}
/**
* @copydoc Functor::getComputedAttr()
*/
constexpr static auto getComputedAttr() {
return std::array<typename Particle::AttributeNames, 1>{Particle::AttributeNames::density};
}
};
} // namespace autopas::sph
|
hardswish_hcl_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: renzun@openailab.com
*/
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include <math.h>
#include <arm_neon.h>
#include "hardswish_param.h"
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct hardswish_param* hardswish_param = ( struct hardswish_param* )ir_node->op.param_mem;
float alpha = hardswish_param->alpha;
float beta = hardswish_param->beta;
float lower = -beta / alpha;
float upper = (1.f / alpha) + lower;
int chan_num = (input_tensor->dims[0]) * (input_tensor->dims[1]);
int chan_size = (input_tensor->dims[2]) * (input_tensor->dims[3]);
float* pdata = ( float* )input_tensor->data;
float* pout_data = ( float* )output_tensor->data;
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _one = vdupq_n_f32(1.f);
int num_thread = exec_graph->num_thread;
#pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < chan_num; j++)
{
float* data = pdata + j * chan_size;
float* out_data = pout_data + j * chan_size;
for (int i = 0; i < (chan_size & -4); i += 4)
{
float32x4_t _p = vld1q_f32(data + i);
float32x4_t _ans = vdupq_n_f32(beta);
_ans = vmlaq_n_f32(_ans, _p, alpha);
_ans = vmaxq_f32(_ans, _zero);
_ans = vminq_f32(_ans, _one);
_ans = vmulq_f32(_ans, _p);
vst1q_f32(out_data + i, _ans);
}
for (int i = chan_size & ~3; i < chan_size; i++)
{
if (data[i] < lower)
out_data[i] = 0.f;
else if (data[i] > upper)
out_data[i] = data[i];
else
out_data[i] = data[i] * (data[i] * alpha + beta);
}
}
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
struct ir_node* ir_node = exec_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
if (input_tensor->data_type != TENGINE_DT_FP32 || input_tensor->layout != TENGINE_LAYOUT_NCHW)
return 0;
return OPS_SCORE_BEST;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_hardswish_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_HARDSWISH, &hcl_node_ops);
}
static int unreg_hardswish_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_HARDSWISH, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_hardswish_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_hardswish_hcl_ops);
|
GB_binop__bget_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bget_int16
// A.*B function (eWiseMult): GB_AemultB__bget_int16
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bget_int16
// C+=b function (dense accum): GB_Cdense_accumb__bget_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_int16
// C=scalar+B GB_bind1st__bget_int16
// C=scalar+B' GB_bind1st_tran__bget_int16
// C=A+scalar GB_bind2nd__bget_int16
// C=A'+scalar GB_bind2nd_tran__bget_int16
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = GB_BITGET (aij, bij, int16_t, 16)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_BITGET (x, y, int16_t, 16) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT16 || GxB_NO_BGET_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bget_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bget_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bget_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bget_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bget_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bget_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = GB_BITGET (x, bij, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bget_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = GB_BITGET (aij, y, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (x, aij, int16_t, 16) ; \
}
GrB_Info GB_bind1st_tran__bget_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (aij, y, int16_t, 16) ; \
}
GrB_Info GB_bind2nd_tran__bget_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rnn_impl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file rnn_impl.h
* \brief
* \author Shu Zhang
*/
#ifndef MXNET_OPERATOR_RNN_IMPL_H_
#define MXNET_OPERATOR_RNN_IMPL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include "./math.h"
#include "./math_functions-inl.h"
#include "./operator_common.h"
#include "./mshadow_op.h"
#include "./linalg.h"
namespace mxnet {
namespace op {
template<typename DType>
inline DType sigmoid(DType x) {
return 1.0f / (1.0f + exp(-x));
}
template<typename DType>
inline DType relu(DType x) {
return x > 0.0f ? static_cast<float>(x) : 0.0f;
}
template<typename DType>
void LstmForwardTrainingSingleLayer(DType* ws,
DType* rs,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H));
const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[i-1][j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
h[j][k] = ht;
// reserve
y[t][j][k + offset] = ht;
c[i][j][k] = ct;
ifgo[i][j][k][0] = it;
ifgo[i][j][k][1] = ft;
ifgo[i][j][k][2] = gt;
ifgo[i][j][k][3] = ot;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
}
}
}
}
template <typename DType>
void LstmForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
const float dropout) {
DType* dropout_random = rs;
DType* rs2 = dropout_random + (L - 1) * D * T * N * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int cell_size = N * H;
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
int idx = 0; // state & cell state's idx;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(rs2 + y_offset, Shape3(T, N, H * D));
LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, false, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, true, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
if (dropout > 0.0f) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < T * N * H * D; j++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[i * T * N * H * D + j] = 0;
y.dptr_[j] = 0;
} else {
dropout_random[i * T * N * H * D + j] = 1.0f - dropout;
y.dptr_[j] = y.dptr_[j] / (1.0f - dropout);
}
}
}
x_ptr = y.dptr_;
rs2 += r_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = (rs2 + y_offset)[i];
}
}
template<typename DType>
void LstmForwardInferenceSingleLayer(DType* ws,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const int P,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, (P ? P : H)));
Tensor<cpu, 2, DType> whr(w_ptr, Shape2(1, 1));
if (P > 0) whr = Tensor<cpu, 2, DType>(wh.dptr_ + P * 4 * H, Shape2(P, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4));
Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> r(hy_ptr, Shape2(1, 1));
if (P > 0) r = Tensor<cpu, 2, DType>(hy_ptr, Shape2(N, P));
const int offset = bid ? H : 0;
const int proj_offset = bid ? P : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
if (P > 0) {
linalg_gemm(i ? r : hx, wh, yh_flat, alpha, beta, false, true);
} else {
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
}
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
if (P == 0) y[t][j][k + offset] = ht;
if (i == T - 1 && state_outputs) {
if (P == 0) hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
} else {
c[j][k] = ct;
}
h[j][k] = ht;
}
if (P > 0) {
linalg_gemm(h, whr, r, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < N; ++j) {
std::memcpy(y[t][j].dptr_ + proj_offset, r[j].dptr_, P * sizeof(DType));
}
}
}
}
template <typename DType>
void LstmForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
const int P,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, P ? P : H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int cell_size = N * H;
const int projection_size = (P ? P : H) * N;
DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2;
DType* y_cur_ptr = y_ptr;
int idx = 0; // state & cell state's idx;
bool flag = L % 2 ? false : true;
for (int i = 0; i < L; ++i) {
const int input_size = i ? (P ? P : H) * D : I;
int w_size = (input_size + (P ? P : H)) * H * 4;
if (P > 0) {
w_size += P * H;
}
// If bidirectional, need space to save current layer output y.
if (D == 2) {
y_cur_ptr = flag ? y_tmp_ptr : y_ptr;
flag = !flag;
}
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, (P ? P : H) * D));
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, false, T, N, input_size, H, P,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
// If bidirectional, then calculate the reverse direction's forward result.
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += projection_size;
cy_ptr += cell_size;
}
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, true, T, N, input_size, H, P,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
// Don't need to move pointer in the last layer.
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
x_ptr = y_cur_ptr;
++idx;
if (state_outputs) {
hy_ptr += projection_size;
cy_ptr += cell_size;
}
}
}
}
template <typename DType>
void LstmBackwardSingleLayer(DType* ws,
DType* rs,
DType* tmp_buf,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
const Tensor<cpu, 3, DType> &dy,
const Tensor<cpu, 2, DType> &dx,
const Tensor<cpu, 2, DType> &dhx,
const Tensor<cpu, 2, DType> &dcx,
DType* dhy_ptr,
DType* dcy_ptr,
DType* w_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I));
Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4));
Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * 4 * H; ++i) {
dwh.dptr_[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 4 * H; ++i) {
dbx.dptr_[i] = 0;
dbh.dptr_[i] = 0;
}
}
Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H));
Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta0 = 0.0;
const DType beta1 = 1.0;
const DType beta2 = 2.0;
const int cell_size = N * H;
if (dhy_ptr != nullptr) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dh.dptr_[i] = dhy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dh.dptr_[i] = 0;
}
}
if (dcy_ptr != nullptr) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dc.dptr_[i] = dcy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dc.dptr_[i] = 0;
}
}
for (int i = T - 1; i >= 0; --i) {
int t = bid ? T - 1 - i : i;
int tnext = bid ? t + 1 : t - 1;
const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx;
const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx;
const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx;
const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx;
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType tc = tanh(c[i][j][k]);
DType it = ifgo[i][j][k][0];
DType ft = ifgo[i][j][k][1];
DType gt = ifgo[i][j][k][2];
DType ot = ifgo[i][j][k][3];
dh[j][k] += dy[t][j][k + offset];
dc[j][k] += dh[j][k] * ot * (1 - tc * tc);
difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it);
difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft);
difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt);
difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot);
if (req_statecell != kNullOp || i > 0) {
dcnext[j][k] = dc[j][k] * ft;
}
if (i) {
htmp[j][k] = y[tnext][j][k + offset];
}
}
Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4));
if (req_state != kNullOp || i > 0) {
linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false);
}
if (req_params != kNullOp) {
if (req_params != kAddTo) {
linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false);
} else {
linalg_gemm(dyh, hnext, dwh, alpha, beta2, true, false);
// generate dwx every time step for AddTo
Tensor<cpu, 2, DType> x_t(x.dptr_ + i * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> dyx_t(difgo.dptr_ + i * N * H * 4, Shape2(N, H * 4));
linalg_gemm(dyx_t, x_t, dwx, alpha, beta2, true, false);
}
}
}
Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4));
if (req_data != kNullOp) {
linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false);
}
if (req_params != kNullOp && req_params != kAddTo) {
linalg_gemm(dyx, x, dwx, alpha, beta0, true, false);
}
const int row = T * N;
const int col = H * 4;
if (req_params != kNullOp) {
if (req_params != kAddTo) {
for (int i = 0; i < row; ++i) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
dbx[j] += dyx[i][j];
dbh[j] = dbx[j];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf, Shape2(col, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + col * T, Shape2(col, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < col * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
for (int i = 0; i < N; ++i) {
tmp_dbx[j][t] += dyx[t * N + i][j];
tmp_dbh[j][t] = tmp_dbx[j][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
dbx[j] += tmp_dbx[j][t] + dbx[j];
dbh[j] += tmp_dbh[j][t] + dbh[j];
}
}
}
}
}
template <typename DType>
void LstmBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dcy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dcx_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell,
const float dropout) {
DType* dropout_random = rs + (L - 1) * D * T * N * H;
DType* rs2 = rs + (L - 1) * D * T * N * H;
DType* tmp_buf = ws;
DType* ws2 = tmp_buf + 8 * T * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int w_size1 = (I + H) * H * 4; // first layer
const int w_size2 = (D * H + H) * H * 4; // other layers
const int cell_size = N * H;
DType* dy_tmp_ptr = ws2 + T * cell_size * 4 + cell_size * 3;
for (int i = L - 1; i >= 0; --i) {
const int input_size = i ? H * D : I;
const int w_size = i ? w_size2 : w_size1;
int idx = i * D;
DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr;
DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr;
DType* db_cur_ptr = db_ptr + i * b_size * D;
DType* rs_cur_ptr = rs2 + i * r_size;
DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : nullptr;
DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : nullptr;
Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D));
Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D));
Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size));
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
if (D == 2) {
w_cur_ptr += w_size;
dw_cur_ptr += w_size;
db_cur_ptr += b_size;
++idx;
dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : nullptr;
dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : nullptr;
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
}
if (dropout > 0.0f && i > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < T * N * D * H; j++) {
if (dropout_random[j] == 0) {
dx.dptr_[j] = 0;
} else {
dx.dptr_[j] = dx.dptr_[j] / (1.0f - dropout);
}
}
}
dy_ptr = dx.dptr_;
}
}
template<typename DType>
void GruForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gemmC2 + N * 3 * H;
DType* zt = rt + N * H;
DType* nt = zt + N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + 3 * H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + 3 * H * 2: nullptr;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j]+ back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l);
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
}
template<typename DType>
void GruForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gateR;
DType* zt = gateZ;
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + 3 * H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + 3 * H * 2 : nullptr;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_Mnh = Mnh + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * 3 * H;
DType* Mnht = Mnh + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
Mnht[i * H + j] = gemmC2[ntb + j] + bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
rt = back_gateR + (T - 1 - t) * N * H;
zt = back_gateZ + (T - 1 - t) * N * H;
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
DType* back_Mnht = back_Mnh + (T - 1 - t) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
back_Mnht[i * H + j] = gemmC2[ntb + j] + back_bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateR_l = rs;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l,
gateR_l, gateZ_l, gateN_l, Mnh_l, y_l, hy_l);
gateR_l = gateR_l + T * D * N * H;
gateZ_l = gateZ_l + T * D * N * H;
gateN_l = gateN_l + T * D * N * H;
Mnh_l = Mnh_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void GruBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* rt;
DType* zt;
DType* nt;
DType* dat;
DType* dart;
DType* dar = ws; // [T, N, 3 * H]
DType* da = dar + T * N * 3 * H; // [T, N, 3 * H]
DType* dht1 = da + T * N * 3 * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* Mnht = Mnh;
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_Mnht = Mnh + T * N * H;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_dwx = dwx + I * 3 * H + H * 3 * H;
DType* back_dwh = dwh + I * 3 * H + H * 3 * H;
DType* back_dbx = dbx + 3 * H * 2;
DType* back_dbh = dbh + 3 * H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * 3 * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * 3 * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (int t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
Mnht = Mnh + t * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = dht1[id] * (ht1[i * D * H + j] - nt[id]) *
zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
dht1[id] = dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(3 * H, H));
Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N * T; ++j) {
dbx[i] += da[j * 3 * H + i];
dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] += tmp_dbh[i][t] + dbh[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_da, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (int t = 0; t < T; ++t) {
if (t == T-1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
rt = back_gateR + t * N * H;
zt = back_gateZ + t * N * H;
nt = back_gateN + t * N * H;
back_Mnht = Mnh + (T + t) * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = back_dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = back_dht1[id] * (back_ht1[i * D * H + H + j] -
nt[id]) * zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * back_Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
back_dht1[id] = back_dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(3 * H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N * T; ++j) {
back_dbx[i] += da[j * 3 * H + i];
back_dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] += tmp_dbh[i][t] + back_dbh[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da2(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_da2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void GruBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H * 3;
DType* dbx = dwh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* gateR_l = rs + (L - 1) * T * D * N * H;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + 3 * H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H * 3;
} else {
wh_l = wh_l + (D * H) * H * 3;
}
DType* dhy_l = nullptr;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* dwh_l = nullptr;
if (L == 1) {
dwh_l = dwx_l + I * H * 3;
} else {
dwh_l = dwx_l + (D * H) * H * 3;
}
DType* dbx_l = dbx + (L - 1) * D * 3 * H * 2;
DType* dbh_l = dbx_l + 3 * H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
int inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
GruBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l,
dhy_l, gateR_l, gateZ_l, gateN_l, Mnh_l, dx_l, dhx_l,
dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateR_l = gateR_l - T * D * N * H;
gateZ_l = gateZ_l - T * D * N * H;
gateN_l = gateN_l - T * D * N * H;
Mnh_l = Mnh_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_l;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * 3 * D;
wh_l = wx_l + inputsize * 3 * H;
dwx_l = dwx_l - (inputsize + H) * H * 3 * D;
dwh_l = dwx_l + inputsize * 3 * H;
} else {
wx_l = wx_l - (I + H) * H * 3 * D;
wh_l = wx_l + I * 3 * H;
dwx_l = dwx_l - (I + H) * H * 3 * D;
dwh_l = dwx_l + I * 3 * H;
}
dbx_l = dbx_l - D * 3 * H * 2;
dbh_l = dbx_l + 3 * H;
}
}
}
template<typename DType>
void VanillaRNNForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + H * 2: nullptr;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
} else {
ht[i * D * H + j] = relu(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
} else {
back_ht[i * D * H + j] = relu(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l,
hy_l, mode);
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
}
template<typename DType>
void VanillaRNNForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateN,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + H * 2 : nullptr;
DType* back_gateN = gateN + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 1, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 1, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
nt[tb + j] = ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j];
ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
nt[tb + j] = back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j];
back_ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout,
int mode) {
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateN_l = rs;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l,
gateN_l, y_l, hy_l, mode);
gateN_l = gateN_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void VanillaRNNBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateN,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state,
int mode) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* dart;
DType* nt;
DType* dar = ws; // [T, N, H]
DType* dht1 = dar + T * N * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_dwx = dwx + I * H + H * H;
DType* back_dwh = dwh + I * H + H * H;
DType* back_dbx = dbx + H * 2;
DType* back_dbh = dbh + H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (int t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
nt = gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int id = i * H + j;
if (mode == 1) {
dart[id] = dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(dht1[id]) : 0.0f;
}
dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(H, H));
Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N * T; ++j) {
dbx[i] += dar[j * H + i];
dbh[i] = dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] = dbx[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dar, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (int t = 0; t < T; ++t) {
if (t == T-1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
nt = back_gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int id = i * H + j;
if (mode == 1) {
dart[id] = back_dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(back_dht1[id]) : 0.0f;
}
back_dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [ H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N * T; ++j) {
back_dbx[i] += dar[j * H + i];
back_dbh[i] = back_dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] = back_dbx[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar2(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dar2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void VanillaRNNBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout,
int mode) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H;
DType* dbx = dwh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* gateN_l = rs + (L - 1) * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * H
+ D * I * H + D * H * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H;
} else {
wh_l = wh_l + (D * H) * H;
}
DType* dhy_l = nullptr;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * H
+ D * I * H + D * H * H;
DType* dwh_l = nullptr;
if (L == 1) {
dwh_l = dwx_l + I * H;
} else {
dwh_l = dwx_l + (D * H) * H;
}
DType* dbx_l = dbx + (L - 1) * D * H * 2;
DType* dbh_l = dbx_l + H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
int inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
VanillaRNNBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l,
y_l, dy_l, dhy_l, gateN_l, dx_l, dhx_l, dwx_l, dwh_l,
dbx_l, dbh_l, req_data, req_params, req_state, mode);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateN_l = gateN_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_l;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * D;
wh_l = wx_l + inputsize * H;
dwx_l = dwx_l - (inputsize + H) * H * D;
dwh_l = dwx_l + inputsize * H;
} else {
wx_l = wx_l - (I + H) * H * D;
wh_l = wx_l + I * H;
dwx_l = dwx_l - (I + H) * H * D;
dwh_l = dwx_l + I * H;
}
dbx_l = dbx_l - D * H * 2;
dbh_l = dbx_l + H;
}
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_RNN_IMPL_H_
|
GB_binop__bxnor_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int64)
// C=scalar+B GB (_bind1st__bxnor_int64)
// C=scalar+B' GB (_bind1st_tran__bxnor_int64)
// C=A+scalar GB (_bind2nd__bxnor_int64)
// C=A'+scalar GB (_bind2nd_tran__bxnor_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_INT64 || GxB_NO_BXNOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxnor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <dmlc/serializer.h>
#include <xgboost/base.h>
#include <xgboost/span.h>
#include <xgboost/host_device_vector.h>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
namespace xgboost {
// forward declare dmatrix.
class DMatrix;
/*! \brief data type accepted by xgboost interface */
enum class DataType : uint8_t {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4,
kStr = 5
};
enum class FeatureType : uint8_t {
kNumerical,
kCategorical
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of data fields in MetaInfo */
static constexpr uint64_t kNumField = 11;
/*! \brief number of rows in the data */
uint64_t num_row_{0}; // NOLINT
/*! \brief number of columns in the data */
uint64_t num_col_{0}; // NOLINT
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0}; // NOLINT
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_; // NOLINT
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_group_t> group_ptr_; // NOLINT
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_; // NOLINT
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_; // NOLINT
/*!
* \brief lower bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT
/*!
* \brief upper bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT
/*!
* \brief Name of type for each feature provided by users. Eg. "int"/"float"/"i"/"q"
*/
std::vector<std::string> feature_type_names;
/*!
* \brief Name for each feature.
*/
std::vector<std::string> feature_names;
/*
* \brief Type of each feature. Automatically set when feature_type_names is specifed.
*/
HostDeviceVector<FeatureType> feature_types;
/*
* \brief Weight of each feature, used to define the probability of each feature being
* selected when using column sampling.
*/
HostDeviceVector<float> feature_weigths;
/*! \brief default constructor */
MetaInfo() = default;
MetaInfo(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo const& that) = delete;
/*!
* \brief Validate all metainfo.
*/
void Validate(int32_t device) const;
MetaInfo Slice(common::Span<int32_t const> ridxs) const;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
/*!
* \brief Set information in the meta info with array interface.
* \param key The key of the information.
* \param interface_str String representation of json format array interface.
*
* [ column_0, column_1, ... column_n ]
*
* Right now only 1 column is permitted.
*/
void SetInfo(const char* key, std::string const& interface_str);
void GetInfo(char const* key, bst_ulong* out_len, DataType dtype,
const void** out_dptr) const;
void SetFeatureInfo(const char *key, const char **info, const bst_ulong size);
void GetFeatureInfo(const char *field, std::vector<std::string>* out_str_vecs) const;
/*
* \brief Extend with other MetaInfo.
*
* \param that The other MetaInfo object.
*
* \param accumulate_rows Whether rows need to be accumulated in this function. If
* client code knows number of rows in advance, set this parameter to false.
*/
void Extend(MetaInfo const& that, bool accumulate_rows);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_feature_t index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief Parameters for constructing batches.
*/
struct BatchParam {
/*! \brief The GPU device to use. */
int gpu_id;
/*! \brief Maximum number of bins per feature for histograms. */
int max_bin{0};
/*! \brief Page size for external memory mode. */
size_t gpu_page_size;
BatchParam() = default;
BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0)
: gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {}
inline bool operator!=(const BatchParam& other) const {
return gpu_id != other.gpu_id || max_bin != other.max_bin ||
gpu_page_size != other.gpu_page_size;
}
};
struct HostSparsePageView {
using Inst = common::Span<Entry const>;
common::Span<bst_row_t const> offset;
common::Span<Entry const> data;
Inst operator[](size_t i) const {
auto size = *(offset.data() + i + 1) - *(offset.data() + i);
return {data.data() + *(offset.data() + i),
static_cast<Inst::index_type>(size)};
}
size_t Size() const { return offset.size() == 0 ? 0 : offset.size() - 1; }
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<bst_row_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid {0};
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
HostSparsePageView GetView() const {
return {offset.ConstHostSpan(), data.ConstHostSpan()};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return Number of instances in the page. */
inline size_t Size() const {
return offset.Size() == 0 ? 0 : offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
/*! \brief Set the base row id for this page. */
inline void SetBaseRowId(size_t row_id) {
base_rowid = row_id;
}
SparsePage GetTranspose(int num_columns) const;
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/**
* \brief Pushes external data batch onto this page
*
* \tparam AdapterBatchT
* \param batch
* \param missing
* \param nthread
*
* \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns.
*/
template <typename AdapterBatchT>
uint64_t Push(const AdapterBatchT& batch, float missing, int nthread);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class EllpackPageImpl;
/*!
* \brief A page stored in ELLPACK format.
*
* This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid
* including CUDA-specific implementation details in the header.
*/
class EllpackPage {
public:
/*!
* \brief Default constructor.
*
* This is used in the external memory case. An empty ELLPACK page is constructed with its content
* set later by the reader.
*/
EllpackPage();
/*!
* \brief Constructor from an existing DMatrix.
*
* This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix
* in CSR format.
*/
explicit EllpackPage(DMatrix* dmat, const BatchParam& param);
/*! \brief Destructor. */
~EllpackPage();
EllpackPage(EllpackPage&& that);
/*! \return Number of instances in the page. */
size_t Size() const;
/*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id);
const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); }
private:
std::unique_ptr<EllpackPageImpl> impl_;
};
template<typename T>
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() = default;
virtual T& operator*() = 0;
virtual const T& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag; // NOLINT
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
T& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator&) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {}
BatchIterator<T> begin() { return begin_iter_; } // NOLINT
BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT
private:
BatchIterator<T> begin_iter_;
};
struct XGBAPIThreadLocalEntry;
/*!
* \brief Internal data structured used by XGBoost during training.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
virtual void SetInfo(const char *key, const void *dptr, DataType dtype,
size_t num) {
this->Info().SetInfo(key, dptr, dtype, num);
}
virtual void SetInfo(const char* key, std::string const& interface_str) {
this->Info().SetInfo(key, interface_str);
}
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/*! \brief Get thread local memory for returning data from DMatrix. */
XGBAPIThreadLocalEntry& GetThreadLocal() const;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches(const BatchParam& param = {});
template <typename T>
bool PageExists() const;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief virtual destructor */
virtual ~DMatrix();
/*! \brief Whether the matrix is dense. */
bool IsDense() const {
return Info().num_nonzero_ == Info().num_row_ * Info().num_col_;
}
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
size_t page_size = kPageSize);
/**
* \brief Creates a new DMatrix from an external data adapter.
*
* \tparam AdapterT Type of the adapter.
* \param [in,out] adapter View onto an external data.
* \param missing Values to count as missing.
* \param nthread Number of threads for construction.
* \param cache_prefix (Optional) The cache prefix for external memory.
* \param page_size (Optional) Size of the page.
*
* \return a Created DMatrix.
*/
template <typename AdapterT>
static DMatrix* Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix = "",
size_t page_size = kPageSize);
/**
* \brief Create a new Quantile based DMatrix used for histogram based algorithm.
*
* \tparam DataIterHandle External iterator type, defined in C API.
* \tparam DMatrixHandle DMatrix handle, defined in C API.
* \tparam DataIterResetCallback Callback for reset, prototype defined in C API.
* \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API.
*
* \param iter External data iterator
* \param proxy A hanlde to ProxyDMatrix
* \param reset Callback for reset
* \param next Callback for next
* \param missing Value that should be treated as missing.
* \param nthread number of threads used for initialization.
* \param max_bin Maximum number of bins.
*
* \return A created quantile based DMatrix.
*/
template <typename DataIterHandle, typename DMatrixHandle,
typename DataIterResetCallback, typename XGDMatrixCallbackNext>
static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy,
DataIterResetCallback *reset,
XGDMatrixCallbackNext *next, float missing,
int nthread,
int max_bin);
virtual DMatrix *Slice(common::Span<int32_t const> ridxs) = 0;
/*! \brief Number of rows per page in external memory. Approximately 100MB per page for
* dataset with 100 features. */
static const size_t kPageSize = 32UL << 12UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0;
virtual bool EllpackExists() const = 0;
virtual bool SparsePageExists() const = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) {
return GetRowBatches();
}
template<>
inline bool DMatrix::PageExists<EllpackPage>() const {
return this->EllpackExists();
}
template<>
inline bool DMatrix::PageExists<SparsePage>() const {
return this->SparsePageExists();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetSortedColumnBatches();
}
template<>
inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) {
return GetEllpackBatches(param);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
namespace serializer {
template <>
struct Handler<xgboost::Entry> {
inline static void Write(Stream* strm, const xgboost::Entry& data) {
strm->Write(data.index);
strm->Write(data.fvalue);
}
inline static bool Read(Stream* strm, xgboost::Entry* data) {
return strm->Read(&data->index) && strm->Read(&data->fvalue);
}
};
} // namespace serializer
} // namespace dmlc
#endif // XGBOOST_DATA_H_
|
GB_unop__identity_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__(none))
// op(A') function: GB (_unop_tran__identity_uint16_uint16)
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info GB (_unop_apply__(none))
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
log.c | /*-
* Copyright (c) 2012-2017 Ilya Kaliman
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stdarg.h>
#include <stdio.h>
#ifdef EFP_USE_MPI
#include <mpi.h>
#endif
#include "log.h"
static void
efp_default_log_cb(const char *msg)
{
fprintf(stderr, "LIBEFP: %s\n", msg);
}
static efp_log_cb _log_cb = efp_default_log_cb;
void
efp_log(const char *fmt, ...)
{
va_list ap;
char msg[512];
if (_log_cb == NULL)
return;
#ifdef _OPENMP
#pragma omp master
#endif
{
#ifdef EFP_USE_MPI
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0) {
va_start(ap, fmt);
vsnprintf(msg, sizeof(msg), fmt, ap);
_log_cb(msg);
va_end(ap);
}
#else
va_start(ap, fmt);
vsnprintf(msg, sizeof(msg), fmt, ap);
_log_cb(msg);
va_end(ap);
#endif
}
}
void
efp_set_log_cb(efp_log_cb log_cb)
{
_log_cb = log_cb;
}
efp_log_cb
efp_get_log_cb(void)
{
return _log_cb;
}
|
nstream-alloc-target.c | ///
/// Copyright (c) 2019, Intel Corporation
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions
/// are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above
/// copyright notice, this list of conditions and the following
/// disclaimer in the documentation and/or other materials provided
/// with the distribution.
/// * Neither the name of Intel Corporation nor the names of its
/// contributors may be used to endorse or promote products
/// derived from this software without specific prior written
/// permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
/// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
/// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
/// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
/// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
/// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
/// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
/// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
/// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
/// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
//////////////////////////////////////////////////////////////////////
///
/// NAME: nstream
///
/// PURPOSE: To compute memory bandwidth when adding a vector of a given
/// number of double precision values to the scalar multiple of
/// another vector of the same length, and storing the result in
/// a third vector.
///
/// USAGE: The program takes as input the number
/// of iterations to loop over the triad vectors, the length of the
/// vectors, and the offset between vectors
///
/// <progname> <# iterations> <vector length> <offset>
///
/// The output consists of diagnostics to make sure the
/// algorithm worked, and of timing statistics.
///
/// NOTES: Bandwidth is determined as the number of words read, plus the
/// number of words written, times the size of the words, divided
/// by the execution time. For a vector length of N, the total
/// number of words read and written is 4*N*sizeof(double).
///
///
/// HISTORY: This code is loosely based on the Stream benchmark by John
/// McCalpin, but does not follow all the Stream rules. Hence,
/// reported results should not be associated with Stream in
/// external publications
///
/// Converted to C++11 by Jeff Hammond, November 2017.
/// Converted to C11 by Jeff Hammond, February 2019.
///
//////////////////////////////////////////////////////////////////////
#pragma omp requires unified_address
#include "prk_util.h"
#include "prk_openmp.h"
int main(int argc, char * argv[])
{
printf("Parallel Research Kernels version %d\n", PRKVERSION );
printf("C11/OpenMP TARGET STREAM triad: A = B + scalar * C\n");
//////////////////////////////////////////////////////////////////////
/// Read and test input parameters
//////////////////////////////////////////////////////////////////////
if (argc < 3) {
printf("Usage: <# iterations> <vector length>\n");
return 1;
}
int iterations = atoi(argv[1]);
if (iterations < 1) {
printf("ERROR: iterations must be >= 1\n");
return 1;
}
// length of a the vector
size_t length = atol(argv[2]);
if (length <= 0) {
printf("ERROR: Vector length must be greater than 0\n");
return 1;
}
int device = (argc > 3) ? atol(argv[3]) : omp_get_initial_device();
if ( (device < 0 || omp_get_num_devices() <= device ) && (device != omp_get_initial_device()) ) {
printf("ERROR: device number %d is not valid.\n", device);
return 1;
}
printf("Number of iterations = %d\n", iterations);
printf("Vector length = %zu\n", length);
printf("OpenMP Device = %d\n", device);
//////////////////////////////////////////////////////////////////////
// Allocate space and perform the computation
//////////////////////////////////////////////////////////////////////
double nstream_time = 0.0;
size_t bytes = length*sizeof(double);
double * restrict A = omp_target_alloc(bytes, device);
double * restrict B = omp_target_alloc(bytes, device);
double * restrict C = omp_target_alloc(bytes, device);
double scalar = 3.0;
#pragma omp target teams distribute parallel for simd schedule(static) device(device) is_device_ptr(A,B,C)
for (size_t i=0; i<length; i++) {
A[i] = 0.0;
B[i] = 2.0;
C[i] = 2.0;
}
{
for (int iter = 0; iter<=iterations; iter++) {
if (iter==1) nstream_time = omp_get_wtime();
#pragma omp target teams distribute parallel for simd schedule(static) device(device) is_device_ptr(A,B,C)
for (size_t i=0; i<length; i++) {
A[i] += B[i] + scalar * C[i];
}
}
nstream_time = omp_get_wtime() - nstream_time;
}
omp_target_free(C, device);
omp_target_free(B, device);
//////////////////////////////////////////////////////////////////////
/// Analyze and output results
//////////////////////////////////////////////////////////////////////
double ar = 0.0;
double br = 2.0;
double cr = 2.0;
for (int i=0; i<=iterations; i++) {
ar += br + scalar * cr;
}
ar *= length;
double asum = 0.0;
#pragma omp target teams distribute parallel for reduction(+:asum) device(device) is_device_ptr(A)
for (size_t i=0; i<length; i++) {
asum += fabs(A[i]);
}
omp_target_free(A, device);
double epsilon=1.e-8;
if (fabs(ar-asum)/asum > epsilon) {
printf("Failed Validation on output array\n"
" Expected checksum: %lf\n"
" Observed checksum: %lf\n"
"ERROR: solution did not validate\n", ar, asum);
return 1;
} else {
printf("Solution validates\n");
double avgtime = nstream_time/iterations;
double nbytes = 4.0 * length * sizeof(double);
printf("Rate (MB/s): %lf Avg time (s): %lf\n", 1.e-6*nbytes/avgtime, avgtime);
}
return 0;
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(8*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(4*t1+Ny+5,8)),floord(8*t2+Ny+4,8)),floord(8*t1-8*t2+Nz+Ny+3,8));t3++) {
for (t4=max(max(max(0,ceild(t1-7,8)),ceild(8*t2-Nz-28,32)),ceild(8*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(4*t1+Nx+5,32)),floord(8*t2+Nx+4,32)),floord(8*t3+Nx+4,32)),floord(8*t1-8*t2+Nz+Nx+3,32));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),8*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),8*t3+6),32*t4+30),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
matrixstrassen.h | /**
* @file matrixstrassen.h matrix strassen operations.
* @author TPOC: palisade@njit.edu
*
* @copyright Copyright (c) 2017, New Jersey Institute of Technology (NJIT)
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef LBCRYPTO_MATH_MATRIXSTRASSEN_H
#define LBCRYPTO_MATH_MATRIXSTRASSEN_H
#include "matrix.h"
namespace lbcrypto {
template<class Element>
class MatrixStrassen { // FIXME : public Serializable {
public:
typedef vector<vector<Element>> data_t;
typedef vector<Element> lineardata_t;
typedef typename vector<Element>::iterator it_lineardata_t;
typedef std::function<Element(void)> alloc_func;
/**
* Constructor that initializes matrix values using a zero allocator
*
* @param &allocZero lambda function for zero initialization.
* @param &rows number of rows.
* @param &rows number of columns.
*/
MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols) : data(), rows(rows), cols(cols), allocZero(allocZero) {
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
/**
* Constructor that initializes matrix values using a distribution generation allocator
*
* @param &allocZero lambda function for zero initialization (used for initializing derived matrix objects)
* @param &rows number of rows.
* @param &rows number of columns.
* @param &allocGen lambda function for intialization using a distribution generator.
*/
MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols, alloc_func allocGen);
/**
* Constructor of an empty matrix; SetSize must be called on this matrix to use it
* Basically this exists to support deserializing
*
* @param &allocZero lambda function for zero initialization.
*/
MatrixStrassen(alloc_func allocZero) : data(), rows(0), cols(0), allocZero(allocZero) {}
void SetSize(size_t rows, size_t cols) {
if( this->rows != 0 || this->cols != 0 )
throw std::logic_error("You cannot SetSize on a non-empty matrix");
this->rows = rows;
this->cols = cols;
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
/**
* Copy constructor
*
* @param &other the matrix object to be copied
*/
MatrixStrassen(const MatrixStrassen<Element>& other) : data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) {
deepCopyData(other.data);
}
/**
* Assignment operator
*
* @param &other the matrix object whose values are to be copied
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& operator=(const MatrixStrassen<Element>& other);
/**
* In-place change of the current matrix to a matrix of all ones
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Ones();
/**
* Fill matrix using the same element
*
* @param &val the element the matrix is filled by
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Fill(const Element &val);
/**
* In-place change of the current matrix to Identity matrix
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Identity();
/**
* Sets the first row to be powers of two
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element> GadgetVector(int32_t base = 2) const;
/**
* Computes the infinity norm
*
* @return the norm in double format
*/
inline double Norm() const;
/**
* Operator for matrix multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
inline MatrixStrassen<Element> operator*(MatrixStrassen<Element> const& other) const {
return Mult(other);
}
/**
* Multiplication of matrix by a scalar
*
* @param &other the multiplier element
* @return the result of multiplication
*/
inline MatrixStrassen<Element> ScalarMult(Element const& other) const {
MatrixStrassen<Element> result(*this);
#pragma omp parallel for
for (int32_t col = 0; col < result.cols; ++col) {
for (int32_t row = 0; row < result.rows; ++row) {
*result.data[row][col] = *result.data[row][col] * other;
}
}
return result;
}
/**
* Operator for scalar multiplication
*
* @param &other the multiplier element
* @return the result of multiplication
*/
inline MatrixStrassen<Element> operator*(Element const& other) const {
return ScalarMult(other);
}
/**
* Equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool Equal(MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
return false;
}
for (size_t i = 0; i < rows; ++i) {
for (size_t j = 0; j < cols; ++j) {
if (data[i][j] != other.data[i][j]) {
return false;
}
}
}
return true;
}
/**
* Operator for equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool operator==(MatrixStrassen<Element> const& other) const {
return Equal(other);
}
/**
* Operator for non-equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool operator!=(MatrixStrassen<Element> const& other) const {
return !Equal(other);
}
/**
* Get property to access the data as a vector of vectors
*
* @return the data as vector of vectors
*/
const data_t& GetData() const {
return data;
}
/**
* Get property to access the number of rows in the matrix
*
* @return the number of rows
*/
size_t GetRows() const {
return rows;
}
/**
* Get property to access the number of columns in the matrix
*
* @return the number of columns
*/
size_t GetCols() const {
return cols;
}
/**
* Get property to access the zero allocator for the matrix
*
* @return the lambda function corresponding to the element zero allocator
*/
alloc_func GetAllocator() const {
return allocZero;
}
/**
* Sets the evaluation or coefficient representation for all ring elements that support the SetFormat method
*
* @param &format the enum value corresponding to coefficient or evaluation representation
*/
void SetFormat(Format format);
/**
* MatrixStrassen addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Add(MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
throw invalid_argument("Addition operands have incompatible dimensions");
}
MatrixStrassen<Element> result(*this);
#pragma omp parallel for
for (int32_t j = 0; j < cols; ++j) {
for (int32_t i = 0; i < rows; ++i) {
*result.data[i][j] += *other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
inline MatrixStrassen<Element> operator+(MatrixStrassen<Element> const& other) const {
return this->Add(other);
}
/**
* Operator for in-place addition
*
* @param &other the matrix to be added
* @return the resulting matrix (same object)
*/
inline MatrixStrassen<Element>& operator+=(MatrixStrassen<Element> const& other);
/**
* MatrixStrassen substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Sub(MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
throw invalid_argument("Subtraction operands have incompatible dimensions");
}
MatrixStrassen<Element> result(allocZero, rows, other.cols);
#pragma omp parallel for
for (int32_t j = 0; j < cols; ++j) {
for (int32_t i = 0; i < rows; ++i) {
*result.data[i][j] = *data[i][j] - *other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
inline MatrixStrassen<Element> operator-(MatrixStrassen<Element> const& other) const {
return this->Sub(other);
}
/**
* Operator for in-place matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix (same object)
*/
inline MatrixStrassen<Element>& operator-=(MatrixStrassen<Element> const& other);
/**
* MatrixStrassen transposition
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Transpose() const;
// YSP The signature of this method needs to be changed in the future
/**
* MatrixStrassen determinant - found using Laplace formula with complexity O(d!), where d is the dimension
*
* @param *result where the result is stored
*/
inline void Determinant(Element *result) const;
/**
* Cofactor matrix - the matrix of determinants of the minors A_{ij} multiplied by -1^{i+j}
*
* @return the cofactor matrix for the given matrix
*/
inline MatrixStrassen<Element> CofactorMatrixStrassen() const;
/**
* Add rows to bottom of the matrix
*
* @param &other the matrix to be added to the bottom of current matrix
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& VStack(MatrixStrassen<Element> const& other);
/**
* Add columns the right of the matrix
*
* @param &other the matrix to be added to the right of current matrix
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& HStack(MatrixStrassen<Element> const& other);
/**
* MatrixStrassen indexing operator - writeable instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
inline Element& operator()(size_t row, size_t col) {
return data[row][col];
}
/**
* MatrixStrassen indexing operator - read-only instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
inline Element const& operator()(size_t row, size_t col) const {
return data[row][col];
}
/**
* MatrixStrassen row extractor
*
* @param &row row index
* @return the row at the index
*/
inline MatrixStrassen<Element> ExtractRow(size_t row) const {
MatrixStrassen<Element> result(this->allocZero,1,this->cols);
int i = 0;
for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) {
result(0,i) = **elem;
i++;
}
return result;
//return *this;
}
/**
* Call switch format for each (ring) element
*
*/
inline void SwitchFormat();
/**
* MatrixStrassen multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
MatrixStrassen<Element> Mult(const MatrixStrassen<Element>& other, int nrec=0, int pad = -1) const;
/*
* Multiply the matrix by a vector whose elements are all 1's. This causes the elements of each
* row of the matrix to be added and placed into the corresponding position in the output vector.
*/
MatrixStrassen<Element> MultByUnityVector() const;
/*
* Multiply the matrix by a vector of random 1's and 0's, which is the same as adding select
* elements in each row together.
* Return a vector that is a rows x 1 matrix.
*/
MatrixStrassen<Element> MultByRandomVector(std::vector<int> ranvec) const;
// /**
// * Serialize the object into a Serialized
// * @param serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
// * @return true if successfully serialized
// */
//bool Serialize(Serialized* serObj) const;
// /**
// * Populate the object from the deserialization of the Serialized
// * @param serObj contains the serialized object
// * @return true on success
// */
//bool Deserialize(const Serialized& serObj);
private:
struct MatDescriptor {
int lda;
int nrec;
int nproc;
int nprocr;
int nprocc;
int nproc_summa;
int bs;
};
const int DESC_SIZE = 7; // number of ints that make up a MatDescriptor
const int rank=0, base=0;
mutable data_t data;
size_t rows;
mutable int rowpad = 0;
size_t cols;
mutable int colpad = 0;
alloc_func allocZero;
mutable char *pattern = NULL;
mutable int numAdd = 0;
mutable int numMult = 0;
mutable int numSub = 0;
mutable MatDescriptor desc;
mutable Element zeroUniquePtr = allocZero();
mutable int NUM_THREADS = 1;
void multiplyInternalCAPS( it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor desc, it_lineardata_t work ) const;
void strassenDFSCAPS( it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor desc, it_lineardata_t workPassThrough ) const;
void block_multiplyCAPS( it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor d, it_lineardata_t workPassThrough ) const;
void LinearizeDataCAPS(lineardata_t *lineardataPtr) const;
void UnlinearizeDataCAPS(lineardata_t *lineardataPtr) const;
int getRank() const;
void verifyDescriptor( MatDescriptor desc );
long long numEntriesPerProc( MatDescriptor desc ) const;
//deep copy of data - used for copy constructor
void deepCopyData(data_t const& src);
void getData(const data_t &Adata, const data_t &Bdata, const data_t &Cdata, int row, int inner, int col) const;
void smartSubtractionCAPS(it_lineardata_t result, it_lineardata_t A, it_lineardata_t B) const;
void smartAdditionCAPS(it_lineardata_t result, it_lineardata_t A, it_lineardata_t B) const;
void addMatricesCAPS( int numEntries, it_lineardata_t C, it_lineardata_t A, it_lineardata_t B ) const;
void addSubMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2,
it_lineardata_t S21, it_lineardata_t S22 ) const;
void subMatricesCAPS( int numEntries, it_lineardata_t C, it_lineardata_t A, it_lineardata_t B ) const;
void tripleAddMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2,
it_lineardata_t S21, it_lineardata_t S22, it_lineardata_t T3, it_lineardata_t S31, it_lineardata_t S32) const;
void tripleSubMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2,
it_lineardata_t S21, it_lineardata_t S22, it_lineardata_t T3, it_lineardata_t S31, it_lineardata_t S32) const ;
void distributeFrom1ProcCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I ) const;
void collectTo1ProcCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I ) const;
void sendBlockCAPS( int rank, int target, it_lineardata_t O, int bs, int source, it_lineardata_t I, int ldi ) const;
void receiveBlockCAPS( int rank, int target, it_lineardata_t O, int bs, int source, it_lineardata_t I, int ldo ) const;
void distributeFrom1ProcRecCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I, int ldi ) const;
void collectTo1ProcRecCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I, int ldo ) const;
};
/**
* Operator for scalar multiplication of matrix
*
* @param &e element
* @param &M matrix
* @return the resulting matrix
*/
template<class Element>
inline MatrixStrassen<Element> operator*(Element const& e, MatrixStrassen<Element> const& M) {
return M.ScalarMult(e);
}
/**
* Generates a matrix of rotations. See pages 7-8 of https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
inline MatrixStrassen<BigInteger> Rotate(MatrixStrassen<Poly> const& inMat);
/**
* Each element becomes a square matrix with columns of that element's
* rotations in coefficient form. See pages 7-8 of https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
inline MatrixStrassen<BigVector> RotateVecResult(MatrixStrassen<Poly> const& inMat);
/**
* Stream output operator
*
* @param &os stream
* @param &m matrix to be outputted
* @return the chained stream
*/
template<class Element>
inline std::ostream& operator<<(std::ostream& os, const MatrixStrassen<Element>& m);
/**
* Gives the Choleshky decomposition of the input matrix.
* The assumption is that covariance matrix does not have large coefficients because it is formed by
* discrete gaussians e and s; this implies int32_t can be used
* This algorithm can be further improved - see the Darmstadt paper section 4.4
* http://eprint.iacr.org/2013/297.pdf
*
* @param &input the matrix for which the Cholesky decomposition is to be computed
* @return the resulting matrix of floating-point numbers
*/
inline MatrixStrassen<double> Cholesky(const MatrixStrassen<int32_t> &input);
/**
* Convert a matrix of integers from BigInteger to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
inline MatrixStrassen<int32_t> ConvertToInt32(const MatrixStrassen<BigInteger> &input, const BigInteger& modulus);
/**
* Convert a matrix of BigVector to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
inline MatrixStrassen<int32_t> ConvertToInt32(const MatrixStrassen<BigVector> &input, const BigInteger& modulus);
/**
* Split a vector of int32_t into a vector of ring elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
inline MatrixStrassen<Poly> SplitInt32IntoPolyElements(MatrixStrassen<int32_t> const& other, size_t n, const shared_ptr<ILParams> params);
/**
* Another method for splitting a vector of int32_t into a vector of ring elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
inline MatrixStrassen<Poly> SplitInt32AltIntoPolyElements(MatrixStrassen<int32_t> const& other, size_t n, const shared_ptr<ILParams> params);
}
#endif // LBCRYPTO_MATH_MATRIXSTRASSEN_H
|
main.c | /*************** 2D LBM-DEM Code **************************/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include <assert.h>
#include "visit_writer.h"
#ifdef _OPENMP
#include <omp.h>
#endif
// Switch on or off FLUID
#define _FLUIDE_
// Maximum number of soil grains
#ifndef nbgrainsMax
#define nbgrainsMax 40000
#endif
// Dimension of the LBM Fluid domain
#ifndef scale
#define scale 1.
#endif
#ifndef lx
#define lx 7826
#endif
#ifndef ly
#define ly 2325
#endif
#ifdef SINGLE_PRECISION
typedef float real;
#define FLOAT_FORMAT "%e"
#else
typedef double real;
#define FLOAT_FORMAT "%le"
#endif
#define pi 3.14159265358979
#define rhoS 2650 // Density of solids
#define rhoW 1000 // Density of water
#define duration 1.5 // Duration of simulation
//********************* Data LBM ************************
#define Q 9
int nbgrains;
// width of LBM grid size, time step, lattice speed
real dx, dtLB, c, c_squ;
real _w[Q] = {4. / 9, 1. / 36, 1. / 9, 1. / 36, 1. / 9,
1. / 36, 1. / 9, 1. / 36, 1. / 9};
real * restrict w = _w;
real (* restrict f)[ly][Q];
// ************************************
// * *
// * e2 e9 e8 *
// * \ | / *
// * \ | / *
// * e3--- e1 ---e7 *
// * / | \ *
// * / | \ *
// * e4 e5 e6 *
// * *
// ************************************
int ex[Q] = {0, -1, -1, -1, 0, 1, 1, 1, 0};
int ey[Q] = {0, 1, 0, -1, -1, -1, 0, 1, 1};
// average fluid density
real rho_moy = 1000; // air density =1 or water =1000 or 999.7 at 20
real rho_outlet, q_outlet;
// relaxation parameter
real tau = 0.504;
real s2 = 1.5, s3 = 1.4, s5 = 1.5, s7 = 1.5, s8 = 1.9841,
s9 = 1.9841; // s8=1.6666667,s9=1.6666667;
// obstacle array
int (* restrict obst)[ly];
// obstacle activity array
int (* restrict act)[ly];
real (* restrict delta)[ly][Q];
// LB diameter for the smallest disk (in nodes number)
real rMin_LB = 10.;
// Fluid kinematic viscosity
real nu = 1e-6; // 15.5e-6 for air and 1e-6 for water at 293K or 20C
real (* restrict press)[ly];
real reductionR = 0.85; // LBM reduced grain diameter
//*********** Data DEM ********************
real G = 9.81;
real angleG = 0.0;
real xG, yG;
real dt; //=5.e-8;
real dt2;
// Spring stiffness
real km = 3e+6, kg = 1.6e+6; // 2e8 1.6e8
real kt = 1.0e+6, ktm = 2e+6; /// changed to higher for lesser
/// interpenetration in sample generation 1.3e8
real nug = 6.4e+1; // 1.1e1
real num = 8.7e+1, numb = 8.7e+1; // 1.5e1
real nuf = 1.5e-1, nugt = 5e-1; // frictionless packing nugt
real mu = .5317;
real mug = 0.0; // Mu for assembling
real mum = .466, mumb = .466; // 0.466 //0.53 0.51 0.43
real murf = 0.01; // 0.01
real r = 1e-3; // 5e-4;v
real distVerlet = 5e-4; // changed to 1e-6 from 1e-3 for error
long UpdateVerlet = 100.;
real dtt = 0.; // Time after which wall to prepare sample is removed
real iterDEM = 100.; // number of DEM iterations per LBM
// Tracked stats
// EPE-Effective potential energy
// Total Wall Friction - WF,
// SE- Strain Energy ESE & IFR- Total Internal Friction
real xfront, height, energie_cin, energie_x, energie_y, energie_teta,
energy_p, energy_EPE, zmean, SE, ESE, WF,
IFR;
real TSE = 0.0, TBW = 0.0, INCE = 0.0, TSLIP = 0.0,
TRW =
0.0; // Total Body Work and Strain Energy TRW_ Total Rotational Work
real pf = 0., pft = 0., pff = 0.; // previous force
real ic = 0;
// ******** Control parameters *************
// Number of DEM steps in LB
int npDEM;
real *rLB;
int stepView = 400;
int stepPrint = 800;
int stepConsole = 400;
int stepStrob = 4000; //visualisation steps
int stepFilm = 8000;
FILE* s_stats;
int nFile = 0; // Nth File saves LB
int cptFlash;
int *cumul;
int * restrict neighbours;
// NeighbourWall Bottom, Right, Left & Top
int * restrict neighbourWallB;
int * restrict neighbourWallR;
int * restrict neighbourWallL;
int * restrict neighbourWallT;
int nNeighWallb, nNeighWallt, nNeighWallL, nNeighWallR;
int start = 0;
long nbsteps = 0;
int vib = 0;
real freq = 5;
real amp = 4.e-4;
real t = 0;
// Luding Friction Model
struct contact {
int i, j;
real nx, ny; // normal vector from j to i
real fn, ft; // force component in local frame
};
// struct contact (*c1)[nbgrainsMax];
// struct contact *c2;
struct force {
real f1, f2, f3;
};
struct force * restrict fhf;
real * restrict fhf1, * restrict fhf2, * restrict fhf3;
struct grain {
real x1, x2, x3;
real v1, v2, v3;
real a1, a2, a3;
real r, m, mw, It;
real p; // Pressure on grain
real s; // shear
real f1, f2; // force
real ifm, fm; // friction mobility
real fr, ifr; // frictional energy wall and Internal
real M11, M12, M21, M22; // Moments M11, M12, M21, M22
real ice, slip,
rw; // Inelastic Collisional Energy, slip, & Rotational work
int z; // number of contacts
int zz; // number of contacts sans the Walls
};
struct grain * restrict g;
// Wall
static real Mby = 0.;
static real Mgx = 0.;
static real Mhy = 0.;
static real Mdx = 0.;
real ecart_ini = 1.0;
// ***************************************************************************
// * utilities
// ***************************************************************************
real Maxt(real x, real y) {
if (x < y)
return 0.;
else
return y;
}
//---------------------------------------------------
real minimum_grain_radius(int nbgrains, struct grain g[nbgrains]) {
real rMin = g[0].r;
for (int i = 1; i <= nbgrains - 1; i++) {
rMin = fmin(rMin, g[i].r);
}
return rMin;
}
//----------------------------------------------------
void swap(real* a, real* b) {
real tmp;
tmp = *a;
*a = *b;
*b = tmp;
}
// *******************************************************************
// * Output files *
// *******************************************************************
void write_vtk(int nx, int ny, real f[nx][ny][Q], int nbgrains, struct grain g[nbgrains]) {
char filename[255];
sprintf(filename, "lbm-dem_%.6i", nFile);
char gpress[255];
sprintf(gpress, "grain_pressure_%.6i", nFile);
char gvel[255];
sprintf(gvel, "grain_velocity_%.6i", nFile);
char gacc[255];
sprintf(gacc, "grain_acceleration_%.6i", nFile);
char fpress[255];
sprintf(fpress, "fluid_pressure_%.6i", nFile);
char fvel[255];
sprintf(fvel, "fluid_velocity_%.6i", nFile);
int dims[] = {nx, ny, 1};
float *xs = malloc(sizeof(float)*nx);
float *ys = malloc(sizeof(float)*ny);
float *zs = malloc(sizeof(float)*1);
float pasxyz = 1. / nx;
for (int i = 0; i < nx; i++) xs[i] = i*pasxyz;
for (int i = 0; i < ny; i++) ys[i] = i*pasxyz;
*zs = 0;
int nvars = 5;
int vardims[5][1] = {{1}, {3}, {3}, {1}, {3}};
int centering[5][1] = {{1}, {1}, {1}, {1}, {1}};
char* varnames[5][1] = {{"grain_pressure"},
{"grain_velocity"},
{"grain_acceleration"},
{"fluid_pressure"},
{"fluid_velocity"}};
char* filenames[] = {gpress, gvel, gacc, fpress, fvel};
float (*grain_pressure )[nx] = malloc(sizeof(float)*nx*ny);
float (*grain_velocity )[nx][3] = malloc(sizeof(float)*nx*ny*3);
float (*grain_acceleration)[nx][3] = malloc(sizeof(float)*nx*ny*3);
float (*fluid_pressure )[nx] = malloc(sizeof(float)*nx*ny);
float (*fluid_velocity )[nx][3] = malloc(sizeof(float)*nx*ny*3);
float* vars[5][1] = {{(float*)grain_pressure},
{(float*)grain_velocity},
{(float*)grain_acceleration},
{(float*)fluid_pressure},
{(float*)fluid_velocity}};
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
int i = obst[x][y];
if (i >= 0 && i < nbgrains) {
grain_pressure[y][x] = g[i].p;
grain_velocity[y][x][0] = g[i].v1;
grain_velocity[y][x][1] = g[i].v2;
grain_velocity[y][x][2] = 0.0;
grain_acceleration[y][x][0] = g[i].a1;
grain_acceleration[y][x][1] = g[i].a2;
grain_acceleration[y][x][2] = 0.0;
fluid_pressure[y][x] = 0.0;
fluid_velocity[y][x][0] = 0.0;
fluid_velocity[y][x][1] = 0.0;
fluid_velocity[y][x][2] = 0.0;
} else {
grain_pressure[y][x] = -1;
grain_velocity[y][x][0] = 0.0;
grain_velocity[y][x][1] = 0.0;
grain_velocity[y][x][2] = 0.0;
grain_acceleration[y][x][0] = 0.0;
grain_acceleration[y][x][1] = 0.0;
grain_acceleration[y][x][2] = 0.0;
fluid_pressure[y][x] = 0.0;
fluid_velocity[y][x][0] = 0.0;
fluid_velocity[y][x][1] = 0.0;
fluid_velocity[y][x][2] = 0.0;
for (int j = 0; j < Q; ++j) {
fluid_pressure[y][x] += f[x][y][j];
fluid_velocity[y][x][0] += f[x][y][j] * ex[j];
fluid_velocity[y][x][1] += f[x][y][j] * ey[j];
}
fluid_pressure[y][x] = (1. / 3.) * rho_moy * (fluid_pressure[y][x] - 1.);
}
}
}
// write_rectilinear_mesh(filename, 1, dims, xs, ys, zs, nvars, vardims, centering, varnames, vars);
for (int i = 0; i < 5; ++i)
write_rectilinear_mesh(filenames[i], 1, dims, xs, ys, zs, 1, vardims[i],
centering[i], varnames[i], vars[i]);
free(xs);
free(ys);
free(zs);
free(grain_pressure);
free(grain_velocity);
free(grain_acceleration);
free(fluid_velocity);
free(fluid_pressure);
}
void write_DEM() {
int i;
char filename[25];
real N0, N1, N2, N3, N4, N5; // Percentage of particles in contact
real xgrainmax;
FILE* outfile;
// Output file
// sprintf(filename,"DEM_Grains%.6i.dat",nFile);
sprintf(filename, "DEM%.6i.dat", nFile);
outfile = fopen(filename, "w");
xfront = g[0].x1 + g[0].r;
height = g[0].x2 + g[0].r;
energie_cin = 0.;
energie_x = 0.;
energie_y = 0.;
energie_teta = 0.;
energy_p = 0.;
energy_EPE = 0.;
SE = 0.;
ESE = 0.;
WF = 0.;
IFR = 0.;
INCE = 0.;
TSLIP = 0.;
TRW = 0.;
zmean = 0;
xgrainmax = g[0].x1;
N0 = 0;
N1 = 0;
N2 = 0;
N3 = 0;
N4 = 0;
N5 = 0;
for (i = 0; i < nbgrains; i++) {
zmean += g[i].z;
if (g[i].z == 0) N0 += 1;
if (g[i].z == 1) N1 += 1;
if (g[i].z == 2) N2 += 1;
if (g[i].z == 3) N3 += 1;
if (g[i].z == 4) N4 += 1;
if (g[i].z == 5) N5 += 1;
energie_x += 0.5 * g[i].m * g[i].v1 * g[i].v1;
energie_y += 0.5 * g[i].m * g[i].v2 * g[i].v2;
energie_teta += 0.5 * g[i].It * g[i].v3 * g[i].v3;
energy_p += g[i].m * G * g[i].x2;
/* if (nbsteps*dt>=dtt) */
SE += 0.5 * (((g[i].p * g[i].p) / kg) + ((g[i].s * g[i].s) / kt));
WF += g[i].fr;
g[i].ifr =
fabs(((g[i].m * G + g[i].f2) * (dt * g[i].v2 + dt2 * g[i].a2 / 2.)) +
(g[i].f1 * (dt * g[i].v1 + dt2 * g[i].a1 / 2.)));
// g[i].ifr=(g[i].f2*(dt*g[i].v2))+(g[i].f1*(dt*g[i].v1));
IFR += g[i].ifr;
TSLIP += g[i].slip;
TRW += g[i].rw;
INCE += g[i].ice;
TBW += g[i].ifr;
ESE = 0.5 * (((g[i].p * g[i].p) / kg) + ((g[i].s * g[i].s) / kt));
TSE += ESE;
// if(g[i].x2>(-1.632*g[i].x1+0.0408)){energy_EPE+=g[i].m*G*g[i].x2;}
if (g[i].x1 + g[i].r > xgrainmax) {
xgrainmax = g[i].x1 + g[i].r;
}
if (g[i].x2 + g[i].r > height) {
height = g[i].x2 + g[i].r;
}
if (g[i].zz > 0 && g[i].x1 + g[i].r >= xfront) {
xfront = g[i].x1 + g[i].r;
}
if (g[i].z == 0) {
g[i].fm = 0;
} else
g[i].fm = g[i].ifm / g[i].z;
fprintf(outfile,
"%i\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%"
"le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%"
"le\t%i\n",
i, g[i].r, g[i].x1, g[i].x2, g[i].x3, g[i].v1, g[i].v2, g[i].v3,
g[i].a1, g[i].a2, g[i].a3, fhf1[i], fhf2[i], fhf3[i], g[i].p,
g[i].s, ESE, g[i].fr, g[i].ifr, g[i].ice, g[i].slip, g[i].rw,
g[i].fm, g[i].M11, g[i].M12, g[i].M21, g[i].M22, g[i].z);
}
energie_cin = energie_x + energie_y + energie_teta;
zmean = zmean / nbgrains;
s_stats = fopen("stats.data", "a");
fprintf(s_stats,
"%le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le "
"%le %le %le %le %le\n",
nbsteps * dt - dtt, xfront, xgrainmax, height, zmean, energie_x,
energie_y, energie_teta, energie_cin, N0 / nbgrains, N1 / nbgrains,
N2 / nbgrains, N3 / nbgrains, N4 / nbgrains, N5 / nbgrains, energy_p,
SE, WF, IFR, INCE, TSLIP, TRW);
fclose(s_stats);
fclose(outfile);
}
void write_forces() {
int i, j;
real dn;
char nomfile[25];
FILE* outfile1;
// Ouverture du fichier
// sprintf(filename,"DEM_Grains%.6i.dat",nFile);
sprintf(nomfile, "DEM%.6i.ps", nFile);
outfile1 = fopen(nomfile, "w");
real margin = 10 * g[0].r, hrx1 = lx, hry2 = ly;
fprintf(outfile1, "%%!PS-Adobe-3.0 EPSF-3.0 \n");
fprintf(outfile1, "%%%BoundingBox: %f %f %f %f \n", -margin, -margin,
hrx1 + margin, hry2 + margin);
fprintf(outfile1, "%%%Creator: Krishna Kumar \n");
fprintf(outfile1, "%%%Title: DEM Grains & Forces \n");
fprintf(outfile1, "0.1 setlinewidth 0.0 setgray \n");
for (i = 0; i <= nbgrains; i++)
fprintf(outfile1,
"newpath %le %le %le 0.0 setlinewidth %.2f setgray 0 360 arc gsave "
"fill grestore\n",
g[i].x1 * 10000, g[i].x2 * 10000, g[i].r * 10000,
(0.8 - g[i].fm / 2));
for (i = 0; i <= nbgrains; i++) {
for (j = 0; j <= nbgrains; j++) {
dn = (sqrt((g[i].x1 - g[j].x1) * (g[i].x1 - g[j].x1) +
(g[i].x2 - g[j].x2) * (g[i].x2 - g[j].x2))) -
g[i].r - g[j].r;
if (dn < -1e-10 && i != j) {
// printf("dn for i %i and j %i are: %le \n",i,j,dn);
fprintf(outfile1, "%le setlinewidth \n 0.0 setgray \n", 1.); // c1[i][j].fn);
fprintf(outfile1, "1 setlinecap \n newpath \n");
fprintf(outfile1, "%le %le moveto \n %le %le lineto\n", g[i].x1 * 10000,
g[i].x2 * 10000, g[j].x1 * 10000, g[j].x2 * 10000);
fprintf(outfile1, "stroke \n");
}
}
}
fclose(outfile1);
}
// --------------------------
void write_densities() {
int x, y, i;
real pasxyz;
real P, u_x, u_y;
char filename[25];
char filename_press[25];
FILE* outfile;
FILE* s_press;
sprintf(filename, "densities%.6i.vtk", nFile);
sprintf(filename_press, "pressure_base%.6i.dat", nFile);
pasxyz = 1. / lx;
outfile = fopen(filename, "w");
s_press = fopen(filename_press, "w");
fprintf(outfile, "# vtk DataFile Version 2.0\n");
fprintf(outfile, "Outfile domain LB t: %e\n", t);
fprintf(outfile, "ASCII\n");
fprintf(outfile, "DATASET RECTILINEAR_GRID\n");
fprintf(outfile, "DIMENSIONS %d %d 1\n", lx, ly);
fprintf(outfile, "X_COORDINATES %d float\n", lx);
for (i = 0; i <= lx - 1; i++) {
fprintf(outfile, "%e ", (float)i * pasxyz);
}
fprintf(outfile, "\n");
fprintf(outfile, "Y_COORDINATES %d float\n", ly);
for (i = 0; i <= ly - 1; i++) {
fprintf(outfile, "%e ", (float)i * pasxyz);
}
fprintf(outfile, "\n");
fprintf(outfile, "Z_COORDINATES 1 float\n");
fprintf(outfile, "0\n");
// Pour LB
fprintf(outfile, "POINT_DATA %d\n", lx * ly);
fprintf(outfile, "SCALARS Pressure float 1\n");
fprintf(outfile, "LOOKUP_TABLE default\n");
for (y = 0; y < ly; y++) {
for (x = 0; x < lx; x++) {
P = 0.;
for (i = 0; i < Q; i++) {
P += f[x][y][i];
}
P = (1. / 3.) * rho_moy * (P - 1.);
if (obst[x][y] < 0) {
fprintf(outfile, "%.4lf\n", P);
if (y == 2) {
fprintf(s_press, "%le %le\n", x * pasxyz, P);
}
} else {
fprintf(outfile, "%.4lf\n", 0.);
if (y == 2) {
fprintf(s_press, "%le %le\n", x * pasxyz, 0.0);
}
}
}
}
fprintf(outfile, "VECTORS VecVelocity float\n");
for (y = 0; y < ly; y++) {
for (x = 0; x < lx; x++) {
// P=rho_moy;
u_x = 0.;
u_y = 0.;
for (i = 0; i < Q; i++) {
u_x += f[x][y][i] * ex[i];
u_y += f[x][y][i] * ey[i];
}
// P = (P-rho_moy)*1./3.;
// P = (1./3.)*rho_moy*(P-1.);
if (obst[x][y] < 0) {
fprintf(outfile, "%.4lf %.4lf 0.\n", u_x, u_y);
} else {
fprintf(outfile, "%.4lf %.4lf 0.\n", 0., 0.);
}
}
}
fclose(s_press);
fclose(outfile);
}
// *******************************************************************
// * sample initial *
// *******************************************************************
void temp_sample() {
long i, j, k;
j = 0;
k = 0;
for (i = 0; i < nbgrains; ++i) {
g[i].r = r; //*(real)(i+1)/nbgrains;
g[i].m = rhoS * pi * g[i].r * g[i].r;
#ifdef _FLUIDE_
g[i].mw = rhoW * pi * g[i].r * g[i].r;
#else
g[i].mw = 0;
#endif
// g[i].m=(4./3.)*rhoS*pi*g[i].r*g[i].r*g[i].r;
g[i].It = g[i].m * g[i].r * g[i].r / 2;
// g[i].It=(2./5.)*g[i].m*g[i].r*g[i].r;
g[i].x1 = r * 1.5 + 2. * r * j; // Pente r*(1.5+(k/10.))
g[i].x2 = r + 2. * r * k;
g[i].x3 = 0.;
g[i].v1 = 0.;
g[i].v2 = 0.;
g[i].v3 = 0.;
g[i].a1 = 0.;
g[i].a2 = 0.;
g[i].a3 = 0.;
// if(j<=4) {j++;} else {j=0;k++;};
if (j < 0 && k == 0) {
j++;
} else {
if (j <= 13) {
j++;
} else {
j = 0;
k++;
};
};
}
}
struct grain* read_sample(char * filename_sample) {
FILE *sample_file = fopen(filename_sample, "r");
char com[256];
fgets(com, 256, sample_file);
printf("%s\n", com);
fscanf(sample_file, "%d\n", &nbgrains);
struct grain *g = malloc(sizeof(struct grain)*nbgrains);
printf("Nb grains %d\n", nbgrains);
for (int i = 0; i < nbgrains; ++i) {
fscanf(sample_file, FLOAT_FORMAT" "FLOAT_FORMAT" "FLOAT_FORMAT";\n",
&g[i].r, &g[i].x1, &g[i].x2);
// printf("%le %le %le\n",g[i].r,g[i].x1,g[i].x2);
g[i].r = g[i].r * r;
g[i].m = rhoS * pi * g[i].r * g[i].r;
g[i].It = g[i].m * g[i].r * g[i].r / 2;
g[i].x1 = g[i].x1 * r;
g[i].x2 = g[i].x2 * r;
g[i].x3 = 0.;
g[i].v1 = 0.;
g[i].v2 = 0.;
g[i].v3 = 0.;
g[i].a1 = 0.;
g[i].a2 = 0.;
g[i].a3 = 0.;
}
fclose(sample_file);
return g;
}
void check_sample(int nbgrains, struct grain g[nbgrains]) {
real xMax = g[0].x1;
real xMin = g[0].x1;
real yMax = g[0].x2;
real yMin = g[0].x2;
real MassGrain = 0.;
for (int i = 0; i < nbgrains; ++i) {
MassGrain += g[i].m;
xMax = fmax(xMax, g[i].x1 + g[i].r);
xMin = fmin(xMin, g[i].x1 - g[i].r);
yMax = fmax(yMax, g[i].x2 + g[i].r);
yMin = fmin(yMin, g[i].x2 - g[i].r);
}
real L0 = xMax - xMin;
real H0 = yMax - yMin;
printf("L0=%le H0=%le Mass of Grains=%le Phi=%le\n", L0, H0, MassGrain,
MassGrain / (rhoS * (L0 * H0)));
}
// *******************************************************************************************
// * Initialise obstacle array *
// *******************************************************************************************
void init_obst() {
int x, y, i, xi, yi, xf, yf;
// c.d.g. sphere
// real xc,yc;
real dist2, r2, R2, xc, yc, rbl0;
for (x = 1; x < lx - 1; x++) {
for (y = 1; y < ly - 1; y++) {
obst[x][y] = -1;
}
}
for (x = 0; x < lx; x++) {
obst[x][0] = obst[x][ly - 1] = nbgrains;
act[x][0] = act[x][ly - 1] = 0;
}
for (y = 1; y < ly - 1; y++) {
obst[0][y] = obst[lx - 1][y] = nbgrains;
act[0][y] = act[lx - 1][y] = 0;
}
for (i = 0; i < nbgrains; i++) {
xc = (g[i].x1 - Mgx) / dx;
yc = (g[i].x2 - Mby) / dx;
r2 = rLB[i] * rLB[i];
// Unreduced grain radius
rbl0 = g[i].r / dx;
R2 = rbl0 * rbl0;
xi = (int)(xc - rbl0);
xf = (int)(xc + rbl0);
if (xi < 1) xi = 1;
if (xf >= lx - 1) xf = lx - 2;
yi = (int)(yc - rbl0);
yf = (int)(yc + rbl0);
if (yi < 1) yi = 1;
if (yf >= ly - 1) yf = ly - 2;
for (x = xi; x <= xf; x++) {
for (y = yi; y <= yf; y++) {
dist2 = (x - xc) * (x - xc) + (y - yc) * (y - yc);
if (dist2 <= R2) {
if (dist2 <= r2) obst[x][y] = i;
}
}
}
}
}
// *******************************************************************************************
// * Initialise density distribution function with equilibrium to zero density
// *
// *******************************************************************************************
void init_density(int nx, int ny, real f[nx][ny][Q]) {
for (int x = 0; x < nx; x++) {
for (int y = 0; y < ny; y++) {
for (int iLB = 0; iLB < Q; iLB++) {
f[x][y][iLB] = w[iLB];
}
}
}
}
// *******************************************************************
// * Calculate the forces on grains *
// *******************************************************************
struct force force_grains(long i, long j) {
// distance normale
real dn, xOiOj, yOiOj, OiOj;
real xn, yn;
real vn, vxOiOj, vyOiOj, vt;
real ftest;
struct force f;
double fn, ft;
// distance relative
xOiOj = g[i].x1 - g[j].x1;
yOiOj = g[i].x2 - g[j].x2;
OiOj = sqrt(xOiOj * xOiOj + yOiOj * yOiOj);
dn = OiOj - g[i].r - g[j].r;
// calculate the forces
if (dn >= 0) {
f.f1 = 0;
f.f2 = 0;
f.f3 = 0;
} else {
// relative normal velocity
vxOiOj = g[i].v1 - g[j].v1;
vyOiOj = g[i].v2 - g[j].v2;
xn = xOiOj / OiOj;
yn = yOiOj / OiOj;
// Compute velocities at contact
vn = vxOiOj * xn + vyOiOj * yn;
// Tangential Velocity
vt = -vxOiOj * yn + vyOiOj * xn - g[i].v3 * g[i].r - g[j].v3 * g[j].r;
// calculate normal force
fn = -kg * dn - nug * vn;
if (fn < 0) fn = 0.0;
ft = - kt * vt * dt;
ftest = mu * fn;
if (fabs(ft) > ftest) {
if (ft < 0.0)
ft = ftest;
else
ft = -ftest;
}
f.f1 = fn * xn - ft * yn;
f.f2 = fn * yn + ft * xn;
f.f3 = -Maxt(ft * g[i].r, fn * murf * g[i].r * g[j].r);
g[i].p += fn;
g[j].p += fn;
g[i].f1 += f.f1;
g[i].f2 += f.f2;
g[i].s += ft;
g[j].s += ft;
g[i].slip +=
fabs(ft) * (fabs(vt * dt) + (fabs(ft - pft)) / kt);
pft = ft;
g[i].rw += fabs(f.f3) * (fabs(g[i].v3 * dt) + (fabs(f.f3 - pff)) / kt);
pff = f.f3;
g[i].z += 1;
g[i].zz += 1;
g[i].ice += ic;
if (fn == 0)
g[i].ifm = 0;
else
g[i].ifm += fabs(ft / (mu * fn));
// Stress computations
g[i].M11 += f.f1 * xOiOj;
g[i].M12 += f.f1 * yOiOj;
g[i].M21 += f.f2 * xOiOj;
g[i].M22 += f.f2 * yOiOj;
}
return f;
}
// *******************************************************************
// * Calculation of forces between the grains and Walls *
// *******************************************************************
struct force force_WallB(long i, real dn) {
real vn, vt, ftest;
struct force f;
real fn, ft;
vn = g[i].v2;
vt = g[i].v1;
fn = -km * dn - num * vn;
if (fn < 0) fn = 0.;
ft = ktm * vt; //*dt; //Krishna
ftest = mumb * fn;
if (fabs(ft) > ftest) {
if (ft < 0.0)
ft = ftest;
else
ft = -ftest;
}
f.f1 = ft;
f.f2 = fn;
f.f3 = -(ft * g[i].r * murf);
g[i].p += fn;
g[i].s += ft;
g[i].f1 += f.f1;
g[i].z += 1;
// Stress computations
g[i].M11 += 0;
g[i].M12 += f.f1 * dt;
g[i].M21 += 0;
g[i].M22 += f.f2 * dt;
g[i].rw += fabs(f.f3) * (fabs(g[i].v3 * dt) + (fabs(f.f3 - pff)) / kt);
g[i].fr += fabs(ft) * (fabs(vt * dt) + (fabs(ft - pft)) / kt);
pff = f.f3;
pft = ft;
return f;
}
struct force force_WallT(long i, real dn) {
real vn, vt, fn, ft, ftmax;
struct force f;
vn = g[i].v2;
fn = km * dn - num * vn;
ic += num * vn * vn * dt;
if (fn > 0.) fn = 0.;
// relative tangential velocity
vt = g[i].v1 + g[i].v3 * g[i].r -
amp * freq * cos(freq * t);
ft = fabs(ktm * vt);
// if (nbsteps*dt<dtt){mumb=mug;nugt=0;}
if (vt >= 0) {
ftmax = mumb * fn - nugt * vt;
} // ic+=nugt*vt*vt*dt;}
else {
ftmax = mumb * fn + nugt * vt;
} // ic+=nug*vt*vt*dt;}
// ftmax=mum*fn-num*vt;
if (ft > ftmax) ft = ftmax;
if (vt > 0) ft = -ft;
f.f1 = ft;
f.f2 = fn;
// f.f3=ft*g[i].r-fabs(murf*g[i].r*g[i].v3*fn);
f.f3 = ft * g[i].r * murf;
// f.f3=(ft-fabs(vt*nuf))*g[i].r;
// Stress computations
g[i].M11 += 0;
g[i].M12 += f.f1 * fabs(dt);
g[i].M21 += 0;
g[i].M22 += f.f2 * fabs(dt);
g[i].p += fn;
g[i].s += ft;
g[i].z += 1;
// g[i].rw+=fabs(f.f3)*(fabs(g[i].v3*dt)+(fabs(f.f3-pff))/kt);
// g[i].fr+=fabs(ft)*(fabs(vt*dt)+(fabs(ft-pft))/kt);
// pff=f.f3;pft=ft;
return f;
}
struct force force_WallL(long i, real dn) {
real vn, fn, vt, ft;
struct force f;
vn = g[i].v1;
fn = -km * dn + num * vn;
ic += num * vn * vn * dt;
if (fn < 0.) fn = 0.;
vt = g[i].v2;
if (vt > 0)
ft = mum * fn;
else
ft = mum * fn;
if (vt > 0) ft = -ft;
f.f1 = fn;
f.f2 = ft;
// f.f2=ft*g[i].r-fabs(murf*g[i].r*g[i].v3*fn);
f.f3 = ft * g[i].r * murf;
// f.f3=(ft-fabs(vt*nuf))*g[i].r;
// Stress computations
g[i].M11 += f.f1 * fabs(dt);
g[i].M12 += 0;
g[i].M21 += f.f2 * fabs(dt);
g[i].M22 += 0;
g[i].p += fn;
g[i].s += ft;
g[i].f1 += f.f1;
g[i].z += 1;
g[i].ice += ic;
g[i].rw += fabs(f.f3) * fabs(g[i].v3 * dt);
g[i].fr += fabs(ft) * (fabs(vt * dt) + (fabs(ft - pft)) / kt);
pft = ft;
return f;
}
struct force force_WallR(long i, real dn) {
real vn, fn, vt, ft;
struct force f;
vn = g[i].v1;
fn = km * dn - num * vn;
// ic+=num*vn*vn*dt;
vt = g[i].v2; // tangential velcoty
ft = mum * fn; // ic+=nugt*vt*vt*dt;
// ftmax=mum*fn-num*vt;
if (vt > 0) ft = -ft;
if (fn > 0.) fn = 0.;
f.f1 = fn;
f.f2 = -ft;
f.f3 = ft * g[i].r * murf;
g[i].p += fn;
g[i].f1 += f.f1;
// g[i].ice +=ic;
// g[i].fr+=fabs(ft)*(fabs(vt*dt)+(fabs(f.f3-pft))/kt);
pft = ft;
// Stress computations
g[i].M11 += f.f1 * fabs(dt);
g[i].M12 += 0;
g[i].M21 += f.f2 * fabs(dt);
g[i].M22 += 0;
// g[i].s += ft;
g[i].z += 1;
return f;
}
// *******************************************************************
// * *
// * *
// * *
// * Calculate the hydrodynamic forces *
// * *
// * *
// * *
// * *
// *******************************************************************
// *******************************************************************************************
// * Reinitialise density distributions for nodes that change state from solid
// to fluid *
// *******************************************************************************************
void reinit_obst_density() {
#pragma omp parallel for
for (int x = 1; x < lx - 1; x++) {
for (int y = 1; y < ly - 1; y++) {
int i = obst[x][y];
if (i != -1) {
// Usqu: the standard (^2) the speed of the node from the portion
// solid to the fluid portion
real u_squ = ((g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3)
+(g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3) * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)) / (c * c);
for (int iLB = 0; iLB < Q; iLB++) {
// eu : e.u in formula feq
real eu = (ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) +
ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)) / c;
f[x][y][iLB] = w[iLB] * (1. + 3 * eu + 4.5 * eu * eu - 1.5 * u_squ); //*rho_moy;
}
}
}
}
}
// *******************************************************************************************
// * Obstacle array construction && nodes activity *
// *******************************************************************************************
void obst_construction() {
int x, y, xp, next_x, next_y, i, iLB, xi, yi, xf, yf;
// c.d.g. sphere
// real xc,yc;
real dist2, aa, bb, cc, r2, xc, yc, R2, rbl0;
#pragma omp parallel for
for (x = 1; x < lx - 1; x++) {
for (y = 1; y < ly - 1; y++) {
obst[x][y] = -1;
act[x][y] = 1;
for (iLB = 1; iLB < Q; iLB++) {
delta[x][y][iLB] = 0;
}
}
}
#pragma omp parallel for
for (i = 0; i < nbgrains; i++) {
xc = (g[i].x1 - Mgx) / dx;
yc = (g[i].x2 - Mby) / dx;
r2 = rLB[i] * rLB[i];
rbl0 = g[i].r / dx; // JYD2
R2 = rbl0 * rbl0;
// xi=xc-rLB[i]; xf=xc+rLB[i]; if(xi<1) xi=1; if(xf>=lx-1) xf=lx-2;
// yi=yc-rLB[i]; yf=yc+rLB[i]; if(yi<1) yi=1; if(yf>=ly-1) yf=ly-2;
xi = (int)(xc - rbl0);
xf = (int)(xc + rbl0);
if (xi < 1) xi = 1;
if (xf >= lx - 1) xf = lx - 2;
yi = (int)(yc - rbl0);
yf = (int)(yc + rbl0);
if (yi < 1) yi = 1;
if (yf >= ly - 1) yf = ly - 2;
for (y = yi; y <= yf; y++) {
for (x = xi; x <= xf; x++) {
dist2 = (x - xc) * (x - xc) + (y - yc) * (y - yc);
if (dist2 <= R2) {
if (dist2 <= r2) obst[x][y] = i;
}
// if(dist2<=r2) obst[x][y]=i;
}
}
// * Obstacle in inteaction with fluid (active obstacles)
for (y = yi; y <= yf; y++) {
for (x = xi; x <= xf; x++) {
if (obst[x][y] == i) {
act[x][y] = 0;
// Search fluid node neighbourss
for (iLB = 1; iLB < Q; iLB++) {
next_x = x + ex[iLB]; // if (next_x<0) next_x=0; if (next_x>=lx)
// next_x=lx-1;
next_y = y + ey[iLB]; // if (next_y<0) next_y=0; if (next_y>=ly)
// next_y=ly-1;
if (obst[next_x][next_y] == -1) {
// Calculating the distance between the node fluid and the wall of the particle
// (Klaus-Nils-Ulrich)
act[x][y] = 1;
xp = x;
aa = fabs(ex[iLB]) + fabs(ey[iLB]);
bb = (xp + ex[iLB] - xc) * ex[iLB] + (y + ey[iLB] - yc) * ey[iLB];
cc = (xp + ex[iLB] - xc) * (xp + ex[iLB] - xc) +
(y + ey[iLB] - yc) * (y + ey[iLB] - yc) - r2;
delta[x][y][iLB] = (bb - sqrt(fabs(bb * bb - aa * cc))) / aa;
}
}
}
}
}
}
}
// ************************************************************************************************
// * Principal LB: Collision - (Streaming + Boundary Conditions) *
// ************************************************************************************************
void collision_streaming() {
const int half = (Q - 1) / 2;
const real a = 1. / 36;
// Post-collision part computation
// (Yu-Mei-Luo-Shyy)
#pragma omp parallel for
for (int x = 1; x < lx - 1; x++) {
for (int y = 1; y < ly - 1; y++) {
if (obst[x][y] == -1) {
real rho = f[x][y][0] + f[x][y][1] + f[x][y][2] + f[x][y][3] + f[x][y][4]
+ f[x][y][5] + f[x][y][6] + f[x][y][7] + f[x][y][8];
real e = -4 * f[x][y][0] + 2 * f[x][y][1] - f[x][y][2] + 2 * f[x][y][3] -
f[x][y][4] + 2 * f[x][y][5] - f[x][y][6] + 2 * f[x][y][7] - f[x][y][8];
real eps = 4 * f[x][y][0] + f[x][y][1] - 2 * f[x][y][2] + f[x][y][3] -
2 * f[x][y][4] + f[x][y][5] - 2 * f[x][y][6] + f[x][y][7] - 2 * f[x][y][8];
real j_x = f[x][y][5] + f[x][y][6] + f[x][y][7] - f[x][y][1] - f[x][y][2] - f[x][y][3];
real q_x = -f[x][y][1] + 2 * f[x][y][2] - f[x][y][3] + f[x][y][5] - 2 * f[x][y][6] + f[x][y][7];
real j_y = f[x][y][1] + f[x][y][8] + f[x][y][7] - f[x][y][3] - f[x][y][4] - f[x][y][5];
real q_y = f[x][y][1] - f[x][y][3] + 2 * f[x][y][4] - f[x][y][5] + f[x][y][7] - 2 * f[x][y][8];
real p_xx = f[x][y][2] - f[x][y][4] + f[x][y][6] - f[x][y][8];
real p_xy = -f[x][y][1] + f[x][y][3] - f[x][y][5] + f[x][y][7];
real j_x2 = j_x * j_x;
real j_y2 = j_y * j_y;
real eO = e - s2 * (e + 2 * rho - 3 * (j_x2 + j_y2) / rho);
real epsO = eps - s3 * (eps - rho + 3 * (j_x2 + j_y2) / rho);
real q_xO = q_x - s5 * (q_x + j_x);
real q_yO = q_y - s7 * (q_y + j_y);
real p_xxO = p_xx - s8 * (p_xx - (j_x2 - j_y2) / rho);
real p_xyO = p_xy - s9 * (p_xy - j_x * j_y / rho);
f[x][y][0] = a * (4*rho - 4 * eO + 4 * epsO);
f[x][y][2] = a * (4*rho - eO - 2*epsO - 6*j_x + 6*q_xO + 9*p_xxO);
f[x][y][4] = a * (4*rho - eO - 2*epsO - 6*j_y + 6*q_yO - 9*p_xxO);
f[x][y][6] = a * (4*rho - eO - 2*epsO + 6*j_x - 6*q_xO + 9*p_xxO);
f[x][y][8] = a * (4*rho - eO - 2*epsO + 6*j_y - 6*q_yO - 9*p_xxO);
f[x][y][1] = a * (4*rho + 2*eO + epsO - 6*j_x - 3*q_xO + 6*j_y + 3*q_yO - 9*p_xyO);
f[x][y][3] = a * (4*rho + 2*eO + epsO - 6*j_x - 3*q_xO - 6*j_y - 3*q_yO + 9*p_xyO);
f[x][y][5] = a * (4*rho + 2*eO + epsO + 6*j_x + 3*q_xO - 6*j_y - 3*q_yO - 9*p_xyO);
f[x][y][7] = a * (4*rho + 2*eO + epsO + 6*j_x + 3*q_xO + 6*j_y + 3*q_yO + 9*p_xyO);
}
}
}
// To calculate the edges; see the book Lattice Boltzmann Modeling
// Bounce back for y=0 & y=ly-1
for (int x = 1; x < lx - 1; x++) {
f[x][0][8] = f[x][1][4];
f[x][0][7] = f[x + 1][1][3]; //;+uw_b/6;
f[x][0][1] = f[x - 1][1][5]; //-uw_b/6;
// Top plate
f[x][ly - 1][4] = f[x][ly - 2][8];
f[x][ly - 1][3] = f[x - 1][ly - 2][7]; //-uw_h/6;
f[x][ly - 1][5] = f[x + 1][ly - 2][1]; //+uw_h/6;
}
for (int y = 1; y < ly - 1; y++) {
f[0][y][6] = f[1][y][2];
f[0][y][7] = f[1][y + 1][3]; //;+uw_b/6;
f[0][y][5] = f[1][y - 1][1]; //-uw_b/6;
f[lx - 1][y][2] = f[lx - 2][y][6];
f[lx - 1][y][3] = f[lx - 2][y - 1][7]; //-uw_h/6;
f[lx - 1][y][1] = f[lx - 2][y + 1][5]; //+uw_h/6;
}
// corner nodes
f[0][0][7] = f[1][1][3];
f[lx - 1][0][1] = f[lx - 2][1][5]; //+uw_b/6
f[0][ly - 1][5] = f[1][ly - 2][1]; //-uw_b/6
f[lx - 1][ly - 1][3] = f[lx - 2][ly - 2][7];
// bounce back in obstacles
/////////////////////////////////////////////////////////
// To calculate force f[][][]) //
// 1: articlel of JYD-Mouloud //
// 2: article of Klaus-Nils-Ulrich //
// 3: article of Yu-Mei-Luo-Shyy //
/////////////////////////////////////////////////////////
for (int x = 1; x < lx - 1; x++) {
for (int y = 1; y < ly - 1; y++) {
int i = obst[x][y];
if (i != -1 && act[x][y] == 1) {
for (int iLB = 1; iLB <= half; iLB++) {
int next_x = x + ex[iLB];
int next_y = y + ey[iLB];
if (obst[next_x][next_y] != -1)
f[x][y][iLB] = w[iLB];
else //(obst[next_x][next_y]==-1)
{
// Calculation is based on JYD-Mouloud (2.3.3)
if (delta[x][y][iLB] >= 0.5) {
f[x][y][iLB] =
f[next_x][next_y][iLB + half] / (2 * delta[x][y][iLB]) +
(2 * delta[x][y][iLB] - 1) * f[next_x][next_y][iLB] /
(2 * delta[x][y][iLB]) +
3 * (w[iLB] / c) *
(ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) +
ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)) /
delta[x][y][iLB];
}
if (delta[x][y][iLB] > 0. && delta[x][y][iLB] < 0.5) {
int next_xx = next_x + ex[iLB];
int next_yy = next_y + ey[iLB];
f[x][y][iLB] =
2 * delta[x][y][iLB] * f[next_x][next_y][iLB + half] +
(1 - 2 * delta[x][y][iLB]) * f[next_xx][next_yy][iLB + half] +
6 * (w[iLB] / c) *
(ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) +
ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3));
}
}
}
for (int iLB = 1 + half; iLB < Q; iLB++) {
int next_x = x + ex[iLB];
int next_y = y + ey[iLB];
if (obst[next_x][next_y] != -1)
f[x][y][iLB] = w[iLB];
else //(obst[next_x][next_y]==-1)
{
// Calculation is based on JYD-Mouloud (2.3.3)
if (delta[x][y][iLB] >= 0.5) {
f[x][y][iLB] =
f[next_x][next_y][iLB - half] / (2 * delta[x][y][iLB]) +
(2 * delta[x][y][iLB] - 1) * f[next_x][next_y][iLB] /
(2 * delta[x][y][iLB]) +
3 * (w[iLB] / c) *
(ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) +
ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)) /
delta[x][y][iLB];
}
if (delta[x][y][iLB] > 0. && delta[x][y][iLB] < 0.5) {
int next_xx = next_x + ex[iLB];
int next_yy = next_y + ey[iLB];
f[x][y][iLB] =
2 * delta[x][y][iLB] * f[next_x][next_y][iLB - half] +
(1 - 2 * delta[x][y][iLB]) * f[next_xx][next_yy][iLB - half] +
6 * (w[iLB] / c) *
(ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) +
ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3));
}
}
}
}
}
}
for (int x = 0; x < lx; x++) {
for (int y = 0; y < ly; y++) {
for (int iLB = 1; iLB <= half; iLB++) {
swap(&f[x][y][iLB], &f[x][y][iLB + half]);
}
}
}
for (int x = 0; x < lx; x++) {
for (int y = 0; y < ly; y++) {
for (int iLB = 1; iLB <= half; iLB++) {
int next_x = x + ex[iLB]; // if(next_x<0) next_x=lx-1;
int next_y = y + ey[iLB];
if (next_x >= 0 && next_y >= 0 && next_x < lx && next_y < ly) {
swap(&f[x][y][iLB + half], &f[next_x][next_y][iLB]);
}
}
}
}
}
// ************************************************************************************************
// * Compute total density to verify of the system that no divergence occurs
// when iterating *
// ************************************************************************************************
void check_density() {
int x, y, iLB;
real sum = 0;
for (x = 0; x < lx; x++) {
for (y = 0; y < ly; y++) {
for (iLB = 0; iLB < Q; iLB++) {
sum = sum + f[x][y][iLB];
}
}
}
printf("Iteration Number %ld, Total density in the system %f\n", nbsteps,
sum);
}
void final_density() {
int x, y, iLB;
real sum = 0;
for (x = 0; x < lx; x++) {
for (y = 0; y < ly; y++) {
for (iLB = 0; iLB < Q; iLB++) {
sum = sum + f[x][y][iLB];
}
}
}
fprintf(stderr, "final_density: %f\n", sum);
}
int min(int x, int y) {
return (x < y) ? x : y;
}
int max(int x, int y) {
return (x > y) ? x : y;
}
// ****************************************************************************
// * Compute hydrodynamic forces *
// ****************************************************************************
void forces_fluid(int nx, int ny, real f[nx][ny][Q], int nbgrains, struct grain g[nbgrains]) {
const int half = (Q - 1) / 2;
for (int i = 0; i < nbgrains; ++i) {
fhf1[i] = 0;
fhf2[i] = 0;
fhf3[i] = 0;
}
#pragma omp parallel for
for (int i = 0; i < nbgrains; ++i) {
const real xc = (g[i].x1 - Mgx) / dx;
const real yc = (g[i].x2 - Mby) / dx;
const real rbl0 = g[i].r / dx;
const int xi = max(xc - rbl0, 1);
const int xf = min(xc + rbl0, nx-2);
const int yi = max(yc - rbl0, 1);
const int yf = min(yc + rbl0, ny-2);
for (int x = xi; x <= xf; ++x) {
for (int y = yi; y <= yf; ++y) {
if (i != obst[x][y]) continue;
for (int iLB = 1; iLB < Q; ++iLB) {
const int next_x = x + ex[iLB];
const int next_y = y + ey[iLB];
if (obst[next_x][next_y] != i) {
const int halfq = (iLB <= half) ? half : -half;
const real fnx = (f[x][y][iLB + halfq] + f[next_x][next_y][iLB]) * ex[iLB + halfq];
const real fny = (f[x][y][iLB + halfq] + f[next_x][next_y][iLB]) * ey[iLB + halfq];
fhf1[i] = fhf1[i] + fnx;
fhf2[i] = fhf2[i] + fny;
fhf3[i] = fhf3[i] - fnx * (y - (g[i].x2 - Mby) / dx) + fny * (x - (g[i].x1 - Mgx) / dx);
}
}
}
}
}
#pragma omp parallel for
for (int i = 0; i < nbgrains; ++i) {
fhf1[i] *= rho_moy * 9 * nu * nu / (dx * (tau - 0.5) * (tau - 0.5));
fhf2[i] *= rho_moy * 9 * nu * nu / (dx * (tau - 0.5) * (tau - 0.5));
fhf3[i] *= dx * rho_moy * 9 * nu * nu / (dx * (tau - 0.5) * (tau - 0.5));
}
}
//**********************************************************
void acceleration_grains() {
long i, j;
int jdep;
real dn, ftest;
real fn, ft;
struct force fji;
if (nbsteps % stepFilm == 0 && start == 1) { // Outfile MGPost
// distance normale
real xOiOj, yOiOj, OiOj;
real xn, yn;
real vn, vxOiOj, vyOiOj;
real vt;
for (i = 0; i <= nbgrains - 1; i++) {
g[i].a1 = fhf1[i];
g[i].a2 = fhf2[i];
g[i].a3 = fhf3[i];
}
// Summation of forces on the grains
for (i = 0; i <= nbgrains - 1; i++) {
if (i == 0)
jdep = 0;
else
jdep = cumul[i - 1];
for (j = jdep; j < cumul[i]; j++) {
//*
// fji=force_grains(i,neighbours[j]);
//*
// forces_grains
xOiOj = g[i].x1 - g[neighbours[j]].x1;
yOiOj = g[i].x2 - g[neighbours[j]].x2;
OiOj = sqrt(xOiOj * xOiOj + yOiOj * yOiOj);
dn = OiOj - g[i].r - g[neighbours[j]].r;
if (dn >= 0) {
fji.f1 = 0;
fji.f2 = 0;
fji.f3 = 0;
} else {
// relative normal velocity
vxOiOj = g[i].v1 - g[neighbours[j]].v1;
vyOiOj = g[i].v2 - g[neighbours[j]].v2;
xn = xOiOj / OiOj;
yn = yOiOj / OiOj;
vn = vxOiOj * xn + vyOiOj * yn;
vt = -vxOiOj * yn + vyOiOj * xn - g[i].v3 * g[i].r -
g[neighbours[j]].v3 * g[neighbours[j]].r;
fn = -kg * dn - nug * vn;
if (fn < 0) fn = 0.0;
ft = kt * vt * dt;
ftest = mu * ft;
if (fabs(ft) > ftest) {
if (ft > 0.0)
ft = ftest;
else
ft = -ftest;
}
//calculate the normal force
fji.f1 = fn * xn - ft * yn;
fji.f2 = fn * yn + ft * xn;
fji.f3 = -ft * g[i].r * murf;
g[i].p += fn;
g[neighbours[j]].p += fn;
g[i].s += ft;
g[neighbours[j]].s += ft;
g[i].slip += fabs(ft) *
(fabs(vt * dt) + (fabs(ft - pft)) / kt);
g[neighbours[j]].slip += fabs(ft) *
(fabs(vt * dt) + (fabs(ft - pft)) / kt);
g[i].rw +=
fabs(fji.f3) * (fabs(g[i].v3 * dt) + (fabs(fji.f3 - pff)) / kt);
g[neighbours[j]].rw +=
fabs(fji.f3) * (fabs(g[i].v3 * dt) + (fabs(fji.f3 - pff)) / kt);
g[i].z += 1;
pff = fji.f3;
pft = ft;
// Stress computations
g[i].M11 += fji.f1 * xOiOj;
g[i].M12 += fji.f1 * yOiOj;
g[i].M21 += fji.f2 * xOiOj;
g[i].M22 += fji.f2 * yOiOj;
}
// end force_grains
g[i].a1 = g[i].a1 + fji.f1;
g[i].a2 = g[i].a2 + fji.f2;
g[i].a3 = g[i].a3 + fji.f3;
g[neighbours[j]].a1 = g[neighbours[j]].a1 - fji.f1;
g[neighbours[j]].a2 = g[neighbours[j]].a2 - fji.f2;
g[neighbours[j]].a3 = g[neighbours[j]].a3 + fji.f3;
}
}
} else { // Calculate normal
for (i = 0; i <= nbgrains - 1; i++) {
g[i].a1 = fhf1[i];
g[i].a2 = fhf2[i];
g[i].a3 = fhf3[i];
}
// summation of forces between the grains
for (i = 0; i <= nbgrains - 1; i++) {
// printf("cumul(%d)= %d\n",i,cumul[i]);
if (i == 0)
jdep = 0;
else
jdep = cumul[i - 1];
for (j = jdep; j < cumul[i]; j++) {
// printf("grain(%d), neighbours(%d)= %d\n",i,i,neighbours[j]);
fji = force_grains(i, neighbours[j]);
g[i].a1 = g[i].a1 + fji.f1;
g[i].a2 = g[i].a2 + fji.f2;
g[i].a3 = g[i].a3 + fji.f3;
g[neighbours[j]].a1 = g[neighbours[j]].a1 - fji.f1;
g[neighbours[j]].a2 = g[neighbours[j]].a2 - fji.f2;
g[neighbours[j]].a3 = g[neighbours[j]].a3 + fji.f3;
}
}
}
// Forces on the botton wall
for (i = 0; i < nNeighWallb; i++) {
dn = g[neighbourWallB[i]].x2 - g[neighbourWallB[i]].r - Mby;
if (dn < 0) {
fji = force_WallB(neighbourWallB[i], dn);
g[neighbourWallB[i]].a1 = g[neighbourWallB[i]].a1 + fji.f1;
g[neighbourWallB[i]].a2 = g[neighbourWallB[i]].a2 + fji.f2;
g[neighbourWallB[i]].a3 = g[neighbourWallB[i]].a3 + fji.f3;
g[neighbourWallB[i]].fr +=
fabs(fji.f1) * (fabs(dt * g[i].v1) + fabs(dt2 * g[i].a1) +
(fabs(fji.f1 - pf)) /
kt); // Friction work at the wall dt2*g[i].a1/2
pf = fji.f1; // Previous force fji.f1
}
}
// Forces on the Top Wall
for (i = 0; i < nNeighWallt; i++) {
dn = -g[neighbourWallT[i]].x2 - g[neighbourWallT[i]].r + Mhy;
if (dn < 0) {
fji = force_WallT(neighbourWallT[i], dn);
g[neighbourWallT[i]].a1 = g[neighbourWallT[i]].a1 + fji.f1;
g[neighbourWallT[i]].a2 = g[neighbourWallT[i]].a2 + fji.f2;
g[neighbourWallT[i]].a3 = g[neighbourWallT[i]].a3 + fji.f3;
}
}
// Forces on the Left Wall
for (i = 0; i < nNeighWallL; i++) {
dn = g[neighbourWallL[i]].x1 - g[neighbourWallL[i]].r - Mgx;
if (dn < 0) {
fji = force_WallL(neighbourWallL[i], dn);
g[neighbourWallL[i]].a1 = g[neighbourWallL[i]].a1 + fji.f1;
g[neighbourWallL[i]].a2 = g[neighbourWallL[i]].a2 + fji.f2;
g[neighbourWallL[i]].a3 = g[neighbourWallL[i]].a3 + fji.f3;
g[neighbourWallL[i]].fr +=
fabs(fji.f2) *
(fabs(dt * g[i].v1) + fabs(dt2 * g[i].a1) +
(fabs(fji.f2 - pf)) / kt); // Friction work at the wall
pf = fji.f2; // Previous force fji.f1
}
}
// Forces on the right Wall
for (i = 0; i < nNeighWallR; i++) {
dn = -g[neighbourWallR[i]].x1 - g[neighbourWallR[i]].r + Mdx;
if (dn < 0) {
fji = force_WallR(neighbourWallR[i], dn);
g[neighbourWallR[i]].a1 = g[neighbourWallR[i]].a1 + fji.f1;
g[neighbourWallR[i]].a2 = g[neighbourWallR[i]].a2 + fji.f2;
g[neighbourWallR[i]].a3 = g[neighbourWallR[i]].a3 + fji.f3;
}
}
// calculate acceleration
for (i = 0; i <= nbgrains - 1; i++) {
g[i].a1 = g[i].a1 / g[i].m + ((g[i].m - g[i].mw) / g[i].m) * xG;
g[i].a2 = (g[i].a2 / g[i].m) + ((g[i].m - g[i].mw) / g[i].m) * yG;
g[i].a3 = g[i].a3 / g[i].It;
}
}
//**********************************************************************
void initVerlet() {
int i, j;
int jneighbours;
real distx, disty;
// real distVerlet=.1e-7;
jneighbours = 0;
for (i = 0; i < nbgrains; i++) {
for (j = i + 1; j < nbgrains; j++) {
distx = g[i].x1 - g[j].x1;
disty = g[i].x2 - g[j].x2;
if (((fabs(distx) - g[i].r - g[j].r) <= distVerlet) &&
((fabs(disty) - g[i].r - g[j].r) <= distVerlet)) {
if ((sqrt(distx * distx + disty * disty) - g[i].r - g[j].r) <=
distVerlet) {
neighbours[jneighbours] = j;
jneighbours++;
if (jneighbours == (nbgrains * 6 - 1))
printf("error! size of vector verlet neighbors is outdated");
}
}
cumul[i] = jneighbours;
}
// printf("cumul(%d)= %d\n",i,cumul[i]);
}
}
void VerletWall() {
int i;
nNeighWallb = 0;
nNeighWallL = 0;
nNeighWallt = 0;
nNeighWallR = 0;
real dn;
// real distVerlet=.1e-7;
// Verlet WallB
if (nbsteps * dt < dtt) {
Mdx = 1.e-3 * lx / 10;
Mhy = (1.e-3 * ly / 10);
} else {
Mdx = 1.e-3 * lx;
Mhy = 1.e-3 * ly;
}
for (i = 0; i < nbgrains; ++i) {
dn = g[i].x2 - g[i].r - Mby;
if (dn < distVerlet) {
neighbourWallB[nNeighWallb] = i;
++nNeighWallb;
}
}
// Verlet WallT
for (i = 0; i < nbgrains; ++i) {
dn = -g[i].x2 - g[i].r + Mhy;
if (dn < distVerlet) {
neighbourWallT[nNeighWallt] = i;
++nNeighWallt;
}
}
// Verlet WallL
for (i = 0; i < nbgrains; ++i) {
dn = g[i].x1 - g[i].r - Mgx;
if (dn < distVerlet) {
neighbourWallL[nNeighWallL] = i;
++nNeighWallL;
}
}
// Verlet WallR
for (i = 0; i < nbgrains; ++i) {
dn = -g[i].x1 - g[i].r + Mdx;
if (dn < distVerlet) {
neighbourWallR[nNeighWallR] = i;
++nNeighWallR;
}
}
}
// *******************************************************************************************
// * writing obstacle arrays
// *
// *******************************************************************************************
void obst_writing() {
int x, y, i;
char filename1[] = "obst_LB.dat";
FILE* outfile1;
outfile1 = fopen(filename1, "w");
for (y = 0; y < ly; y++) {
for (x = 0; x < lx; x++) {
fprintf(outfile1, "%d ", obst[x][y]);
}
fprintf(outfile1, "\n");
}
fclose(outfile1);
char filename2[] = "active_nodes.dat";
FILE* outfile2;
outfile2 = fopen(filename2, "w");
for (y = 0; y < ly; y++) {
for (x = 0; x < lx; x++) {
fprintf(outfile2, "%d ", act[x][y]);
}
fprintf(outfile2, "\n");
}
fclose(outfile2);
char filename3[] = "links.dat";
FILE* outfile3;
outfile3 = fopen(filename3, "w");
for (y = 0; y < ly; y++) {
for (x = 0; x < lx; x++) {
for (i = 1; i < Q; i++) {
if (delta[x][y][i] != 0) {
fprintf(outfile3, "%d %d %d %f\n", x, y, i, delta[x][y][i]);
}
}
}
}
fclose(outfile3);
}
// ****************************************************************************
// * Output of results to velocity files *
// * Distribution of verlocity x - y *
// ****************************************************************************
void velocity_profile() {
int x, y, i;
real u_y, d_loc;
real u_y1;
char filename1[] = "yvel_vs_x.dat";
FILE* outfile1;
outfile1 = fopen(filename1, "w");
fprintf(outfile1, "# vitesse u_x ordonnée \n");
y = (int)((g[0].x2 - Mby) / dx);
for (x = 0; x < lx; x++) {
if (obst[x][y] != -1 && obst[x][y] != nbgrains)
u_y1 = g[obst[x][y]].v2 / c;
else {
u_y = 0;
d_loc = 0.;
for (i = 0; i < Q; i++) {
d_loc = d_loc + f[x][y][i];
}
for (i = 0; i < Q; i++) {
u_y = u_y + f[x][y][i] * ey[i];
}
u_y1 = u_y / d_loc;
}
fprintf(outfile1, "%d %.10lf\n", x, u_y1);
}
fclose(outfile1);
}
//************************************************************
// Calculate pressure *
//************************************************************
void pressures() {
int x, y;
for (x = 0; x < lx; x++) {
for (y = 0; y < ly; y++) {
if (obst[x][y] == -1) {
press[x][y] =
(f[x][y][0] + f[x][y][1] + f[x][y][2] + f[x][y][3] + f[x][y][4] +
f[x][y][5] + f[x][y][6] + f[x][y][7] + f[x][y][8] - rho_moy) *
c_squ;
} else
press[x][y] = 0.;
}
}
}
//----------------------------------------------------------
void renderScene(void) {
long i;
if (start == 1) {
if (vib == 1) {
t = t + dt;
// Mby=Mby+0.1*amp*sin(freq*t);
Mgx = Mgx + amp * sin(freq * t);
Mdx = Mdx + amp * sin(freq * t);
}
// FORCES FLUIDES !!!
#ifdef _FLUIDE_
if (nbsteps % npDEM == 0) {
reinit_obst_density();
obst_construction();
collision_streaming();
if (nbsteps % stepConsole == 0) check_density();
forces_fluid(lx, ly, f, nbgrains, g);
}
#endif
if (nbsteps % UpdateVerlet == 0) {
initVerlet();
VerletWall();
}
/*
for(i=0; i<=nbgrains_bas-1; i++) {
g[i].a1=0.;
g[i].a2=0.;
g[i].a3=0.;
}
*/
for (i = 0; i <= nbgrains - 1; i++) {
g[i].p = 0; // reset pressure
g[i].s = 0.;
g[i].ifm = 0;
g[i].f1 = 0.;
g[i].f2 = 0.;
g[i].ice = 0;
g[i].fr = 0.;
g[i].slip = 0;
g[i].rw = 0.;
ic = 0.;
g[i].M11 = g[i].M12 = g[i].M21 = g[i].M22 = 0.; // Moments
g[i].z = 0; // reset coordination numbers
g[i].zz = 0;
g[i].x1 = g[i].x1 + dt * g[i].v1 + dt2 * g[i].a1 / 2.;
g[i].x2 = g[i].x2 + dt * g[i].v2 + dt2 * g[i].a2 / 2.;
g[i].x3 = g[i].x3 + dt * g[i].v3 + dt2 * g[i].a3 / 2.;
g[i].v1 = g[i].v1 + dt * g[i].a1 / 2.;
g[i].v2 = g[i].v2 + dt * g[i].a2 / 2.;
g[i].v3 = g[i].v3 + dt * g[i].a3 / 2.;
}
acceleration_grains();
for (i = 0; i <= nbgrains - 1; i++) {
// g[i].p=g[i].p/(2.*M_PI*g[i].r); // pressure on grains
g[i].v1 = g[i].v1 + dt * g[i].a1 / 2.;
g[i].v2 = g[i].v2 + dt * g[i].a2 / 2.;
g[i].v3 = g[i].v3 + dt * g[i].a3 / 2.;
}
nbsteps++;
}
if (nbsteps % stepFilm == 0 && start == 1) {
#ifdef _FLUIDE_
write_vtk(lx, ly, f, nbgrains, g);
#endif
nFile++;
}
if (nbsteps % stepStrob == 0 && start == 1) {
write_DEM();
write_forces();
}
}
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////// MAIN /////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
time_t time_raw_format;
struct tm* ptr_time;
printf("2D LBM-DEM code\n");
int i;
real dtmax;
if (argc != 2) {
printf("usage: usage %s <filename>\n", argv[0]);
exit(EXIT_FAILURE);
}
printf("Opening file : %s\n", argv[1]);
c_squ = 1. / 3.;
g = read_sample(argv[1]);
check_sample(nbgrains, g);
f = malloc(sizeof(real)*lx*ly*Q); assert(f);
obst = malloc(sizeof(int)*lx*ly); assert(obst);
act = malloc(sizeof(int)*lx*ly); assert(act);
delta = malloc(sizeof(real)*lx*ly*Q); assert(delta);
rLB = malloc(sizeof(real)*nbgrains); assert(rLB);
cumul = malloc(sizeof(int)*nbgrains); assert(cumul);
neighbours = malloc(sizeof(int)*nbgrains*6); assert(neighbours);
neighbourWallB = malloc(sizeof(int)*nbgrains); assert(neighbourWallB);
neighbourWallR = malloc(sizeof(int)*nbgrains); assert(neighbourWallR);
neighbourWallL = malloc(sizeof(int)*nbgrains); assert(neighbourWallL);
neighbourWallT = malloc(sizeof(int)*nbgrains); assert(neighbourWallT);
memset(cumul, 0, sizeof(int)*nbgrains);
memset(neighbours, 0, sizeof(int)*nbgrains*6);
memset(neighbourWallB, 0, sizeof(int)*nbgrains);
memset(neighbourWallR, 0, sizeof(int)*nbgrains);
memset(neighbourWallL, 0, sizeof(int)*nbgrains);
memset(neighbourWallT, 0, sizeof(int)*nbgrains);
// c1 = malloc(sizeof(struct contact)*nbgrainsMax*nbgrainsMax); assert(c1);
// c2 = malloc(sizeof(struct contact)*nbgrains); assert(c2);
// memset(c1, 0, sizeof(struct contact)*nbgrains*nbgrains);
// memset(c2, 0, sizeof(struct contact)*nbgrains);
fhf = malloc(sizeof(struct force)*nbgrains); assert(fhf);
fhf1 = malloc(sizeof(real)*nbgrains); assert(fhf1);
fhf2 = malloc(sizeof(real)*nbgrains); assert(fhf2);
fhf3 = malloc(sizeof(real)*nbgrains); assert(fhf3);
init_density(lx, ly, f);
Mgx = 0.;
Mdx = 1.e-3 * lx / 10;
Mhy = 1.e-3 * ly / 10;
Mby = 0.;
xG = -G * sin(angleG);
yG = -G * cos(angleG);
dx = (1./scale) * (Mdx - Mgx) / (lx - 1);
printf("no space %le\n", dx);
// Compute the time step for DEM
real rMin = minimum_grain_radius(nbgrains, g);
dtmax = (1 / iterDEM) * pi * rMin * sqrt(pi * rhoS / kg);
dtLB = dx * dx * (tau - 0.5) / (3 * nu);
npDEM = (dtLB / dtmax + 1);
c = dx / dtLB;
dt = dtLB / npDEM;
dt2 = dt * dt;
printf("dtLB=%le, dtmax=%le, dt=%le, npDEM=%d, c=%lf\n", dtLB, dtmax,
dt, npDEM, c);
for (i = 0; i <= nbgrains - 1; i++) {
rLB[i] = reductionR * g[i].r / dx;
}
init_obst();
// VerletWall();
time(&time_raw_format);
ptr_time = localtime(&time_raw_format);
printf("Current local time and date: %s", asctime(ptr_time));
char filename_stats[] = "stats.data";
s_stats = fopen(filename_stats, "w");
fprintf(s_stats,
"#1_t 2_xfront 3_xgrainmax 4_height 5_zmean 6_energie_x 7_energie_y "
"8_energie_teta 9_energie_cin 10_N0 11_N1 12_N2 13_N3 14_N4 15_N5 "
"16_energy_Potential 17_Strain_Energy 18_Frictional_Work "
"19_Internal_Friction 20_Inelastic_Collision 21_Slip "
"22_Rotational_Work\n");
fclose(s_stats);
start = 1;
do {
renderScene();
time(&time_raw_format);
ptr_time = localtime(&time_raw_format);
if (nbsteps % UpdateVerlet == 0)
printf(
"steps %li steps %le KE %le PE %le SE %le WF %le INCE %le SLIP %le "
"RW %le Time %s \n",
nbsteps, nbsteps * dt, energie_cin, energy_p, SE, WF, INCE, TSLIP,
TRW, asctime(ptr_time)); // IFR TSE TBW
} while (nbsteps * dt <= duration);
final_density();
time(&time_raw_format);
ptr_time = localtime(&time_raw_format);
printf("End local time and date: %s", asctime(ptr_time));
return 0;
}
|
clang-262701.c | #include <stdio.h>
#include <string.h>
#include <omp.h>
#define THREADS 2
#define TEAMS 2
int main(){
int gpu_results[TEAMS];
int correct_results[TEAMS];
int actual_num_threads = -1;
// the runtime is allowed to use <=THREADS in the parallel regions and
// it actually chooses 1 (new runtime) or 2 (old runtime)
#pragma omp target teams thread_limit(THREADS) num_teams(TEAMS) \
map(from:gpu_results, actual_num_threads)
{
int dist[THREADS];
// Uncomment line below to trigger generic kernel before fix was in place
//dist[0] = 0;
#pragma omp parallel
{
int thread = omp_get_thread_num();
int team = omp_get_team_num();
dist[thread] = 0;
#pragma omp barrier
dist[thread] += 1;
#pragma omp barrier
if(thread == 0) {
if (team == 0)
actual_num_threads = omp_get_num_threads();
for(int i = 1; i < THREADS; i++)
dist[0] += dist[i];
gpu_results[team] = dist[0];
}
}
}
for(int i = 0; i < TEAMS; i++)
correct_results[i] = actual_num_threads;
int status = memcmp(correct_results, gpu_results, TEAMS * sizeof(int));
if (status != 0 || actual_num_threads > 2) {
printf("FAIL\n");
return 1;
}
printf("PASS\n");
return 0;
}
|
GB_unaryop__identity_fp32_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp32_uint8
// op(A') function: GB_tran__identity_fp32_uint8
// C type: float
// A type: uint8_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp32_uint8
(
float *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__second_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__second_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__second_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__second_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_uint32)
// A*D function (colscale): GB (_AxD__second_uint32)
// D*A function (rowscale): GB (_DxB__second_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__second_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__second_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_uint32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = bij
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_UINT32 || GxB_NO_SECOND_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__second_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
reports.h | /*
* Copyright 2013-2020, Derrick Wood <dwood@cs.jhu.edu>
*
* This file is part of the Kraken 2 taxonomic sequence classification system.
*/
#ifndef KRAKEN2_REPORTS_H_
#define KRAKEN2_REPORTS_H_
#include <unordered_map>
#include <iomanip>
#include "kraken2_headers.h"
#include "taxonomy.h"
#include "kraken2_data.h"
#include "readcounts.h"
namespace kraken2 {
// Still TODO: Create an MPA-style reporter that can take a std::vector of
// call_counts and a std::vector of sample IDs
void ReportMpaStyle(std::string filename, bool report_zeros, Taxonomy &tax, taxon_counts_t &call_counts);
//void ReportKrakenStyle(std::string filename, Taxonomy &tax, taxon_counts_t &call_counts, uint64_t total_unclassified);
taxon_counts_t GetCladeCounts(Taxonomy &tax, taxon_counts_t &call_counts);
void PrintMpaStyleReportLine(std::ofstream &ofs, uint64_t clade_count, const std::string &taxonomy_line);
void MpaReportDFS(taxid_t taxid, std::ofstream &ofs, bool report_zeros, Taxonomy &taxonomy,
taxon_counts_t &clade_counts, std::vector<std::string> &taxonomy_names);
void PrintKrakenStyleReportLine(std::ofstream &ofs, uint64_t total_seqs, uint64_t clade_count, uint64_t call_count,
const std::string &rank_str, uint32_t taxid, const std::string &sci_name, int depth);
void KrakenReportDFS(uint32_t taxid, std::ofstream &ofs, bool report_zeros,
Taxonomy &taxonomy, taxon_counts_t &clade_counts,
uint64_t total_seqs, char rank_code, int rank_depth, int depth);
void ReportKrakenStyle(std::string filename, bool report_zeros, Taxonomy &taxonomy,
taxon_counts_t &call_counts,
uint64_t total_seqs, uint64_t total_unclassified);
template<typename READCOUNTS>
class TaxReport {
private:
const Taxonomy * taxonomy;
const unordered_map<taxid_t, READCOUNTS>& _taxCounts; // set in constructor, from classification
const bool _show_zeros;
unordered_map<taxid_t, vector<const READCOUNTS*> > _children;
unordered_map<taxid_t, READCOUNTS> _cladeCounts; // consider accessing by TaxEntry*
public:
TaxReport(const Taxonomy & taxonomy,
const unordered_map<taxid_t, READCOUNTS>& readCounts,
bool show_zeros)
: taxonomy(&taxonomy), _taxCounts(readCounts), _show_zeros(show_zeros) {
cerr << "Setting values in the taxonomy tree ...";
//unordered_map<const TaxonomyEntry<TAXID>*, unordered_set<const TaxonomyEntry<TAXID>*> > _children1;
for (auto it = _taxCounts.begin(); it != _taxCounts.end(); ++it) {
TaxonomyNode node;
taxid_t tax = it->first;
while (tax != 0) {
node = taxonomy.nodes()[tax];
_children[tax].push_back(&(it->second));
tax = node.parent_id;
}
}
#pragma omp parallel for schedule(dynamic, 50)
for (size_t i = 0; i < _children.size(); ++i) {
auto cit = _children.begin();
advance(cit, i);
READCOUNTS rc = *(cit->second.front());
for (size_t j = 1; j < cit->second.size(); ++j){
auto counter = cit->second[j];
rc += *counter;
}
#pragma omp critical(update_clade_counts)
{
_cladeCounts.insert( std::make_pair( cit->first, std::move(rc) ) );
}
}
cerr << " done" << endl;
}
void ReportKrakenStyle(std::string filename, uint64_t total_sequences, uint64_t total_unclassified){
ofstream ofs(filename);
// // Special handling of the unclassified sequences
if (total_unclassified != 0 || _show_zeros) {
// printLine(ofstream& ofs, const taxid_t taxid,
// const READCOUNTS & rc,
// unsigned depth, const string &rank_str, uint32_t external_taxid,
// const string &sci_name, uint64_t total_sequences)
READCOUNTS rc(total_unclassified, 0);
printLine(ofs, 0, rc, 0, "U", 0, "unclassified", total_sequences);
}
printReport(ofs, 1, 0, 'R', -1, total_sequences);
}
void printReport(ofstream & ofs, const taxid_t taxid, unsigned depth, char rank_code, int rank_depth, uint64_t total_sequences) {
const auto taxit_ptr = _cladeCounts.find(taxid);
if (taxit_ptr == _cladeCounts.end())
return;
const auto & cladecounts = taxit_ptr->second;
if (!_show_zeros && cladecounts.readCount() == 0)
return;
TaxonomyNode node = taxonomy->nodes()[taxid];
string rank = taxonomy->rank_data() + node.rank_offset;
if (rank == "superkingdom") { rank_code = 'D'; rank_depth = 0; }
else if (rank == "kingdom") { rank_code = 'K'; rank_depth = 0; }
else if (rank == "phylum") { rank_code = 'P'; rank_depth = 0; }
else if (rank == "class") { rank_code = 'C'; rank_depth = 0; }
else if (rank == "order") { rank_code = 'O'; rank_depth = 0; }
else if (rank == "family") { rank_code = 'F'; rank_depth = 0; }
else if (rank == "genus") { rank_code = 'G'; rank_depth = 0; }
else if (rank == "species") { rank_code = 'S'; rank_depth = 0; }
else { rank_depth++; }
string rank_str(&rank_code, 0, 1);
if (rank_depth != 0)
rank_str += std::to_string(rank_depth);
string name = taxonomy->name_data() + node.name_offset;
printLine(ofs, taxid, cladecounts, depth, rank_str, node.external_id, name, total_sequences);
// Sort children
vector<size_t> pos;
unordered_map<size_t, READCOUNTS *> rc;
auto child_count = node.child_count;
if (child_count != 0) {
vector<uint64_t> children(child_count);
for (auto i = 0u; i < child_count; i++) {
children[i] = node.first_child + i;
}
for (size_t i = 0; i < children.size(); ++i) {
auto it = _cladeCounts.find(children[i]);
if (it != _cladeCounts.end()) {
pos.push_back(i);
rc[i] = &(it->second);
}
}
std::sort(pos.begin(), pos.end(), [&](size_t a, size_t b) { return *(rc.at(b)) < *(rc.at(a)); });
for (size_t i = 0; i < rc.size(); ++i) {
auto child_taxid = children[pos[i]];
printReport(ofs, child_taxid, depth + 1, rank_code, rank_depth, total_sequences);
}
}
}
void printLine(ofstream& ofs, const taxid_t taxid,
const READCOUNTS & rc,
unsigned depth, const string &rank_str, uint32_t external_taxid,
const string &sci_name, uint64_t total_sequences)
{
const auto r_it = _taxCounts.find(taxid);
const bool has_tax_data = r_it != _taxCounts.end();
ofs << setprecision(4) << 100.0*rc.readCount()/total_sequences << "\t"
<< rc.readCount() << "\t"
<< (has_tax_data? r_it->second.readCount() : 0) << "\t"
<< rc.kmerCount() << "\t"
<< rc.uniqueKmerCount() << "\t"
<< rank_str << "\t"
<< external_taxid << "\t" ;
for (auto i = 0; i < depth; i++)
ofs << " ";
ofs << sci_name << std::endl;
}
};
}
#endif
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace)
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
image->alpha_trait=BlendPixelTrait;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char ) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image,ExceptionInfo *exception)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
char
value[MagickPathExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.x);
(void) SetImageProperty(image,"tiff:XResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.y);
(void) SetImageProperty(image,"tiff:YResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
*has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
if (type == 0)
{
if (packet_size == 1)
SetPixelIndex(image,ScaleQuantumToChar(pixel),q);
else
SetPixelIndex(image,ScaleQuantumToShort(pixel),q);
}
color=image->colormap+(ssize_t) ConstrainColormapIndex(image,
(ssize_t) GetPixelIndex(image,q),exception);
if ((type == 0) && (channels > 1))
return;
else
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
{
status=MagickFalse;
break;
}
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
/*
else if (packet_size == 4)
{
TODO: Figure out what to do there.
}
*/
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
/* TODO: Remove this when we figure out how to support this */
if ((compression == ZipWithPrediction) && (image->depth == 32))
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)");
return(MagickFalse);
}
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
register ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const ImageInfo *image_info,
const size_t index)
{
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,(size_t) count);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
if (size == 0)
return(MagickTrue);
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].page.x=(ssize_t) ReadBlobSignedLong(image);
y=(ssize_t) ReadBlobSignedLong(image);
x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(image_info,i) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
imageListLength;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
}
else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,(size_t) (psd_info.depth < 16 ?
256 : 65536),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(image,blocks,(size_t) length,
&has_merged_image,exception);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
imageListLength=GetImageListLength(image);
if ((has_merged_image != MagickFalse) || (imageListLength == 1))
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (imageListLength == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (imageListLength == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if (next_image->storage_class != PseudoClass)
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if (next_image->storage_class == PseudoClass)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
pi-v1.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
#if _DEBUG_
#define _DEBUG_ 1
#else
#define _DEBUG_ 0
#endif
int main(int argc, char *argv[]) {
double x, sum=0.0, pi=0.0;
#if !_DEBUG_
double start,end;
#endif
int i;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
int num_steps = atoi(argv[1]);
double step = 1.0/(double) num_steps;
#if !_DEBUG_
start= omp_get_wtime();
#endif
/* do computation -- using all available threads */
// WARNING : incorrect code
#pragma omp parallel
{
#if _DEBUG_
int id = omp_get_thread_num();
#endif
for (i=0; i < num_steps; ++i) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
}
pi = step * sum;
#if !_DEBUG_
end = omp_get_wtime();
printf("Wall clock execution time = %.9f seconds\n", end-start);
#endif
/* print results */
printf("Value of pi = %12.10f\n", pi);
return EXIT_SUCCESS;
}
|
GB_binop__max_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_fp32)
// A*D function (colscale): GB (_AxD__max_fp32)
// D*A function (rowscale): GB (_DxB__max_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__max_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__max_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_fp32)
// C=scalar+B GB (_bind1st__max_fp32)
// C=scalar+B' GB (_bind1st_tran__max_fp32)
// C=A+scalar GB (_bind2nd__max_fp32)
// C=A'+scalar GB (_bind2nd_tran__max_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = fmaxf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fmaxf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_FP32 || GxB_NO_MAX_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = fmaxf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = fmaxf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmaxf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmaxf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
layerramdistancetransform.h | /*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2017-2020 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#pragma once
#include <modules/base/basemoduledefine.h>
#include <inviwo/core/common/inviwo.h>
#include <inviwo/core/util/indexmapper.h>
#include <inviwo/core/datastructures/image/layer.h>
#include <inviwo/core/datastructures/image/layerram.h>
#include <inviwo/core/datastructures/image/layerramprecision.h>
#ifdef IVW_USE_OPENMP
#include <omp.h>
#endif
namespace inviwo {
namespace util {
/**
* Implementation of Euclidean Distance Transform according to Saito's algorithm:
* T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance transformations
* of an n-dimensional digitized picture with applications. Pattern Recognition, 27(11).
* pp. 1551-1565, 1994.
* http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Saito94.pdf
*
* Calculates the distance in base mat space
* * Predicate is a function of type (const T &value) -> bool to deside if a value in the input
* is a "feature".
* * ValueTransform is a function of type (const U& squaredDist) -> U that is appiled to all
* squared distance values at the end of the calculation.
* * ProcessCallback is a function of type (double progress) -> void that is called with a value
* from 0 to 1 to indicate the progress of the calculation.
*/
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void layerRAMDistanceTransform(const LayerRAMPrecision<T>* inLayer,
LayerRAMPrecision<U>* outDistanceField, const Matrix<2, U> basis,
const size2_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename T, typename U>
void layerRAMDistanceTransform(const LayerRAMPrecision<T>* inVolume,
LayerRAMPrecision<U>* outDistanceField, const Matrix<2, U> basis,
const size2_t upsample);
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField,
const size2_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename U, typename ProgressCallback>
void layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField,
const size2_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale, ProgressCallback callback);
template <typename U>
void layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField,
const size2_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale);
} // namespace util
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void util::layerRAMDistanceTransform(const LayerRAMPrecision<T>* inLayer,
LayerRAMPrecision<U>* outDistanceField,
const Matrix<2, U> basis, const size2_t upsample,
Predicate predicate, ValueTransform valueTransform,
ProgressCallback callback) {
#ifdef IVW_USE_OPENMP
omp_set_num_threads(std::thread::hardware_concurrency());
#endif
using int64 = glm::int64;
auto square = [](auto a) { return a * a; };
callback(0.0);
const T* src = inLayer->getDataTyped();
U* dst = outDistanceField->getDataTyped();
const i64vec2 srcDim{inLayer->getDimensions()};
const i64vec2 dstDim{outDistanceField->getDimensions()};
const i64vec2 sm{upsample};
const auto squareBasis = glm::transpose(basis) * basis;
const Vector<2, U> squareBasisDiag{squareBasis[0][0], squareBasis[1][1]};
const Vector<2, U> squareVoxelSize{squareBasisDiag / Vector<2, U>{dstDim * dstDim}};
const Vector<2, U> invSquareVoxelSize{Vector<2, U>{1.0f} / squareVoxelSize};
{
const auto maxdist = glm::compMax(squareBasisDiag);
bool orthogonal = true;
for (size_t i = 0; i < squareBasis.length(); i++) {
for (size_t j = 0; j < squareBasis.length(); j++) {
if (i != j) {
if (std::abs(squareBasis[i][j]) > 10.0e-8 * maxdist) {
orthogonal = false;
break;
}
}
}
}
if (!orthogonal) {
LogWarnCustom(
"layerRAMDistanceTransform",
"Calculating the distance transform on a non-orthogonal layer will not give "
"correct values");
}
}
if (srcDim * sm != dstDim) {
throw Exception(
"DistanceTransformRAM: Dimensions does not match src = " + toString(srcDim) +
" dst = " + toString(dstDim) + " scaling = " + toString(sm),
IVW_CONTEXT_CUSTOM("layerRAMDistanceTransform"));
}
util::IndexMapper<2, int64> srcInd(srcDim);
util::IndexMapper<2, int64> dstInd(dstDim);
auto is_feature = [&](const int64 x, const int64 y) {
return predicate(src[srcInd(x / sm.x, y / sm.y)]);
};
// first pass, forward and backward scan along x
// result: min distance in x direction
#ifdef IVW_USE_OPENMP
#pragma omp parallel for
#endif
for (int64 y = 0; y < dstDim.y; ++y) {
// forward
U dist = static_cast<U>(dstDim.x);
for (int64 x = 0; x < dstDim.x; ++x) {
if (!is_feature(x, y)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y)] = squareVoxelSize.x * square(dist);
}
// backward
dist = static_cast<U>(dstDim.x);
for (int64 x = dstDim.x - 1; x >= 0; --x) {
if (!is_feature(x, y)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y)] = std::min<U>(dst[dstInd(x, y)], squareVoxelSize.x * square(dist));
}
}
// second pass, scan y direction
// for each voxel v(x,y,z) find min_i(data(x,i,z) + (y - i)^2), 0 <= i < dimY
// result: min distance in x and y direction
callback(0.45);
#ifdef IVW_USE_OPENMP
#pragma omp parallel
#endif
{
std::vector<U> buff;
buff.resize(dstDim.y);
#ifdef IVW_USE_OPENMP
#pragma omp for
#endif
for (int64 x = 0; x < dstDim.x; ++x) {
// cache column data into temporary buffer
for (int64 y = 0; y < dstDim.y; ++y) {
buff[y] = dst[dstInd(x, y)];
}
for (int64 y = 0; y < dstDim.y; ++y) {
auto d = buff[y];
if (d != U(0)) {
const auto rMax = static_cast<int64>(std::sqrt(d * invSquareVoxelSize.y)) + 1;
const auto rStart = std::min(rMax, y - 1);
const auto rEnd = std::min(rMax, dstDim.y - y);
for (int64 n = -rStart; n < rEnd; ++n) {
const auto w = buff[y + n] + squareVoxelSize.y * square(n);
if (w < d) d = w;
}
}
dst[dstInd(x, y)] = d;
}
}
}
// scale data
callback(0.9);
const int64 layerSize = dstDim.x * dstDim.y;
#ifdef IVW_USE_OPENMP
#pragma omp parallel for
#endif
for (int64 i = 0; i < layerSize; ++i) {
dst[i] = valueTransform(dst[i]);
}
callback(1.0);
}
template <typename T, typename U>
void util::layerRAMDistanceTransform(const LayerRAMPrecision<T>* inLayer,
LayerRAMPrecision<U>* outDistanceField,
const Matrix<2, U> basis, const size2_t upsample) {
util::layerRAMDistanceTransform(
inLayer, outDistanceField, basis, upsample,
[](const T& val) { return util::glm_convert_normalized<double>(val) > 0.5; },
[](const U& squareDist) {
return static_cast<U>(std::sqrt(static_cast<double>(squareDist)));
},
[](double f) {});
}
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void util::layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField,
const size2_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback) {
const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>();
inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) {
layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample,
predicate, valueTransform, callback);
});
}
template <typename U, typename ProgressCallback>
void util::layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField,
const size2_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale, ProgressCallback progress) {
const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>();
inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) {
using ValueType = util::PrecisionValueType<decltype(lrprecision)>;
const auto predicateIn = [threshold](const ValueType& val) { return val < threshold; };
const auto predicateOut = [threshold](const ValueType& val) { return val > threshold; };
const auto normPredicateIn = [threshold](const ValueType& val) {
return util::glm_convert_normalized<double>(val) < threshold;
};
const auto normPredicateOut = [threshold](const ValueType& val) {
return util::glm_convert_normalized<double>(val) > threshold;
};
const auto valTransIdent = [scale](const float& squareDist) {
return static_cast<float>(scale * squareDist);
};
const auto valTransSqrt = [scale](const float& squareDist) {
return static_cast<float>(scale * std::sqrt(squareDist));
};
if (normalize && square && flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, normPredicateIn, valTransIdent, progress);
} else if (normalize && square && !flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, normPredicateOut, valTransIdent, progress);
} else if (normalize && !square && flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, normPredicateIn, valTransSqrt, progress);
} else if (normalize && !square && !flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, normPredicateOut, valTransSqrt, progress);
} else if (!normalize && square && flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, predicateIn, valTransIdent, progress);
} else if (!normalize && square && !flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, predicateOut, valTransIdent, progress);
} else if (!normalize && !square && flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, predicateIn, valTransSqrt, progress);
} else if (!normalize && !square && !flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, predicateOut, valTransSqrt, progress);
}
});
}
template <typename U>
void util::layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField,
const size2_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale) {
util::layerDistanceTransform(inLayer, outDistanceField, upsample, threshold, normalize, flip,
square, scale, [](double) {});
}
} // namespace inviwo
|
omp-hello.c | /*
============================================================================
Name : omp-hello.c
Author :
Version :
Copyright : Your copyright notice
Description : Hello OpenMP World in C
============================================================================
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
/**
* Hello OpenMP World prints the number of threads and the current thread id
*/
int main (int argc, char *argv[]) {
int numThreads, tid;
/* This creates a team of threads; each thread has own copy of variables */
#pragma omp parallel private(numThreads, tid)
{
tid = omp_get_thread_num();
printf("Hello World from thread number %d\n", tid);
/* The following is executed by the master thread only (tid=0) */
if (tid == 0)
{
numThreads = omp_get_num_threads();
printf("Number of threads is %d\n", numThreads);
}
}
return 0;
}
|
firstlastprivate.c | #include <stdio.h>
#include <omp.h>
#include <assert.h>
int main()
{
int i,sum=77;
int num_steps=100;
omp_set_num_threads(4);
#pragma omp parallel for firstprivate (sum) lastprivate (sum)
for(i=1;i<=num_steps;i++)
{
sum=sum + i;
}
printf("sum=%d\n",sum);
return 0;
}
|
deconvolution_packnto1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_packnto1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packnto1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
const float* kptr = (const float*)weight_data_packnto1 + maxk * channels * p * packn;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * packn;
int k = y * kernel_w + x;
vfloat32m1_t _val = vle32_v_f32m1(sptr, vl);
vfloat32m1_t _w = vle32_v_f32m1(kptr + k * packn, vl);
_sum = vfmacc_vv_f32m1(_sum, _val, _w, vl);
}
}
kptr += maxk * packn;
}
sum = vfmv_f_s_f32m1_f32(vfredusum_vs_f32m1_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl));
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
|
matmul_float.c | /*
* Square matrix multiplication
* A[N][N] * B[N][N] = C[N][N]
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N 512
//#define N 16
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
void init(float **A) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
A[i][j] = (float)rand()/(float)(RAND_MAX/10.0);
}
}
}
void matmul_simd(float **A, float **B, float **C) {
int i,j,k;
float temp;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
temp = 0;
#pragma omp simd reduction(+:temp)
for (k = 0; k < N; k++) {
temp += A[i][k] * B[j][k];
}
C[i][j] = temp;
}
}
}
// Debug functions
void print_matrix(float **matrix) {
for (int i = 0; i<8; i++) {
printf("[");
for (int j = 0; j<8; j++) {
printf("%.2f ", matrix[i][j]);
}
puts("]");
}
puts("");
}
void matmul_serial(float **A, float **B, float **C) {
int i,j,k;
float temp;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
temp = 0;
for (k = 0; k < N; k++) {
temp += A[i][k] * B[j][k];
}
C[i][j] = temp;
}
}
}
float check(float **A, float **B){
float difference = 0;
for(int i = 0;i<N; i++){
for (int j = 0; j<N; j++)
{ difference += A[i][j]- B[i][j];}
}
return difference;
}
// Main
int main(int argc, char *argv[]) {
//Set everything up
float **A = malloc(sizeof(float*)*N);
float **B = malloc(sizeof(float*)*N);
float **C_simd = malloc(sizeof(float*)*N);
float **C_serial = malloc(sizeof(float*)*N);
float **BT = malloc(sizeof(float*)*N);
for (int i = 0; i<N; i++) {
A[i] = malloc(sizeof(float)*N);
B[i] = malloc(sizeof(float)*N);
C_simd[i] = malloc(sizeof(float)*N);
C_serial[i] = malloc(sizeof(float)*N);
BT[i] = malloc(sizeof(float)*N);
}
srand(time(NULL));
init(A);
init(B);
for(int line = 0; line<N; line++){
for(int col = 0; col<N; col++){
BT[line][col] = B[col][line];
}
}
int i;
int num_runs = 10;
double elapsed = read_timer();
for (i=0; i<num_runs; i++)
matmul_simd(A, BT, C_simd);
elapsed = (read_timer() - elapsed);
double elapsed_serial = read_timer();
for (i=0; i<num_runs; i++)
matmul_serial(A, BT, C_serial);
elapsed_serial = (read_timer() - elapsed_serial);
print_matrix(A);
print_matrix(BT);
puts("=\n");
print_matrix(C_simd);
puts("---------------------------------");
print_matrix(C_serial);
double gflops_omp = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed));
double gflops_serial = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed_serial));
printf("======================================================================================================\n");
printf("\tMatrix Multiplication: A[N][N] * B[N][N] = C[N][N], N=%d\n", N);
printf("------------------------------------------------------------------------------------------------------\n");
printf("Performance:\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("matmul_omp:\t\t%4f\t%4f\n", elapsed, gflops_omp);
printf("matmul_serial:\t\t%4f\t%4f\n", elapsed_serial, gflops_serial);
printf("Correctness check: %f\n", check(C_simd,C_serial));
return 0;
}
|
omp_bug1fix.c | /******************************************************************************
* FILE: omp_bug1fix.c
* DESCRIPTION:
* This is a corrected version of the omp_bug1.c example. Corrections
* include removing all statements between the parallel for construct and
* the actual for loop, and introducing logic to preserve the ability to
* query a thread's id and print it from inside the for loop.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 04/06/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 50
#define CHUNKSIZE 5
int main (int argc, char *argv[])
{
int i, chunk, tid;
float a[N], b[N], c[N];
char first_time;
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
chunk = CHUNKSIZE;
first_time = 'y';
#pragma omp parallel for \
shared(a,b,c,chunk) \
private(i,tid) \
schedule(static,chunk) \
firstprivate(first_time)
for (i=0; i < N; i++) {
if (first_time == 'y') {
tid = omp_get_thread_num();
first_time = 'n';
}
c[i] = a[i] + b[i];
printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]);
}
}
|
check.h | #include <omp.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
int offloading_disabled()
{
char *xlStr= getenv("OMP_TARGET_OFFLOAD");
if (! xlStr) return 0;
if (0==strcmp(xlStr, "DISABLED")) return 1;
return 0;
}
int check_offloading(){
int A[1] = {-1};
if (offloading_disabled())
A[0] = 0;
else
{
#pragma omp target
{
A[0] = omp_is_initial_device();
}
}
if (!A[0]){
printf("Able to use offloading!\n");
return 0;
}
else
printf("### Unable to use offloading! 8^( ###\n");
return 1;
}
|
GB_unaryop__lnot_uint8_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_int16
// op(A') function: GB_tran__lnot_uint8_int16
// C type: uint8_t
// A type: int16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_int16
(
uint8_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
par_mgr.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Two-grid system solver
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#include "par_mgr.h"
#ifdef HYPRE_USING_DSUPERLU
#include "dsuperlu.h"
#endif
/* Create */
void *
hypre_MGRCreate()
{
hypre_ParMGRData *mgr_data;
mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST);
/* block data */
(mgr_data -> block_size) = 1;
(mgr_data -> block_num_coarse_indexes) = NULL;
(mgr_data -> point_marker_array) = NULL;
(mgr_data -> block_cf_marker) = NULL;
/* general data */
(mgr_data -> max_num_coarse_levels) = 10;
(mgr_data -> A_array) = NULL;
(mgr_data -> P_array) = NULL;
(mgr_data -> RT_array) = NULL;
(mgr_data -> RAP) = NULL;
(mgr_data -> CF_marker_array) = NULL;
(mgr_data -> coarse_indices_lvls) = NULL;
(mgr_data -> A_ff_array) = NULL;
(mgr_data -> F_fine_array) = NULL;
(mgr_data -> U_fine_array) = NULL;
(mgr_data -> aff_solver) = NULL;
(mgr_data -> fine_grid_solver_setup) = NULL;
(mgr_data -> fine_grid_solver_solve) = NULL;
(mgr_data -> F_array) = NULL;
(mgr_data -> U_array) = NULL;
(mgr_data -> residual) = NULL;
(mgr_data -> rel_res_norms) = NULL;
(mgr_data -> Vtemp) = NULL;
(mgr_data -> Ztemp) = NULL;
(mgr_data -> Utemp) = NULL;
(mgr_data -> Ftemp) = NULL;
(mgr_data -> num_iterations) = 0;
(mgr_data -> num_interp_sweeps) = 1;
(mgr_data -> num_restrict_sweeps) = 1;
(mgr_data -> trunc_factor) = 0.0;
(mgr_data -> max_row_sum) = 0.9;
(mgr_data -> strong_threshold) = 0.25;
(mgr_data -> P_max_elmts) = 0;
(mgr_data -> coarse_grid_solver) = NULL;
(mgr_data -> coarse_grid_solver_setup) = NULL;
(mgr_data -> coarse_grid_solver_solve) = NULL;
(mgr_data -> global_smoother) = NULL;
(mgr_data -> use_default_cgrid_solver) = 1;
(mgr_data -> use_default_fsolver) = -1; // set to -1 to avoid printing when not used
(mgr_data -> omega) = 1.;
(mgr_data -> max_iter) = 20;
(mgr_data -> tol) = 1.0e-6;
(mgr_data -> relax_type) = 0;
(mgr_data -> relax_order) = 1; // not fully utilized. Only used to compute L1-norms.
(mgr_data -> interp_type) = NULL;
(mgr_data -> restrict_type) = NULL;
(mgr_data -> num_relax_sweeps) = 1;
(mgr_data -> relax_weight) = 1.0;
(mgr_data -> logging) = 0;
(mgr_data -> print_level) = 0;
(mgr_data -> frelax_print_level) = 0;
(mgr_data -> cg_print_level) = 0;
(mgr_data -> l1_norms) = NULL;
(mgr_data -> reserved_coarse_size) = 0;
(mgr_data -> reserved_coarse_indexes) = NULL;
(mgr_data -> reserved_Cpoint_local_indexes) = NULL;
(mgr_data -> diaginv) = NULL;
(mgr_data -> global_smooth_iters) = 1;
(mgr_data -> global_smooth_type) = 0;
(mgr_data -> set_non_Cpoints_to_F) = 0;
(mgr_data -> idx_array) = NULL;
(mgr_data -> Frelax_method) = NULL;
(mgr_data -> VcycleRelaxVtemp) = NULL;
(mgr_data -> VcycleRelaxZtemp) = NULL;
(mgr_data -> FrelaxVcycleData) = NULL;
(mgr_data -> Frelax_num_functions) = NULL;
(mgr_data -> max_local_lvls) = 10;
(mgr_data -> use_non_galerkin_cg) = NULL;
(mgr_data -> print_coarse_system) = 0;
(mgr_data -> set_c_points_method) = 0;
(mgr_data -> lvl_to_keep_cpoints) = 0;
(mgr_data -> cg_convergence_factor) = 0.0;
(mgr_data -> truncate_coarse_grid_threshold) = 0.0;
return (void *) mgr_data;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/* Destroy */
HYPRE_Int
hypre_MGRDestroy( void *data )
{
hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data;
HYPRE_Int i;
HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels);
/* block info data */
if ((mgr_data -> block_cf_marker))
{
for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
}
}
hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if (mgr_data -> block_num_coarse_indexes)
{
hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* final residual vector */
if ((mgr_data -> residual))
{
hypre_ParVectorDestroy( (mgr_data -> residual) );
(mgr_data -> residual) = NULL;
}
if ((mgr_data -> rel_res_norms))
{
hypre_TFree( (mgr_data -> rel_res_norms), HYPRE_MEMORY_HOST);
(mgr_data -> rel_res_norms) = NULL;
}
/* temp vectors for solve phase */
if ((mgr_data -> Vtemp))
{
hypre_ParVectorDestroy( (mgr_data -> Vtemp) );
(mgr_data -> Vtemp) = NULL;
}
if ((mgr_data -> Ztemp))
{
hypre_ParVectorDestroy( (mgr_data -> Ztemp) );
(mgr_data -> Ztemp) = NULL;
}
if ((mgr_data -> Utemp))
{
hypre_ParVectorDestroy( (mgr_data -> Utemp) );
(mgr_data -> Utemp) = NULL;
}
if ((mgr_data -> Ftemp))
{
hypre_ParVectorDestroy( (mgr_data -> Ftemp) );
(mgr_data -> Ftemp) = NULL;
}
/* coarse grid solver */
if ((mgr_data -> use_default_cgrid_solver))
{
if ((mgr_data -> coarse_grid_solver))
{
hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) );
}
(mgr_data -> coarse_grid_solver) = NULL;
}
/* l1_norms */
if ((mgr_data -> l1_norms))
{
for (i = 0; i < (num_coarse_levels); i++)
{
hypre_SeqVectorDestroy((mgr_data -> l1_norms)[i]);
}
hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST);
}
/* coarse_indices_lvls */
if ((mgr_data -> coarse_indices_lvls))
{
for (i = 0; i < (num_coarse_levels); i++)
if ((mgr_data -> coarse_indices_lvls)[i])
{
hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST);
}
hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST);
}
/* linear system and cf marker array */
if (mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array ||
mgr_data -> CF_marker_array)
{
for (i = 1; i < num_coarse_levels + 1; i++)
{
hypre_ParVectorDestroy((mgr_data -> F_array)[i]);
hypre_ParVectorDestroy((mgr_data -> U_array)[i]);
if ((mgr_data -> P_array)[i - 1])
{
hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i - 1]);
}
if ((mgr_data -> RT_array)[i - 1])
{
hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i - 1]);
}
hypre_IntArrayDestroy(mgr_data -> CF_marker_array[i - 1]);
}
for (i = 1; i < (num_coarse_levels); i++)
{
if ((mgr_data -> A_array)[i])
{
hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]);
}
}
}
/* AMG for Frelax */
if (mgr_data -> A_ff_array || mgr_data -> F_fine_array || mgr_data -> U_fine_array)
{
for (i = 1; i < num_coarse_levels + 1; i++)
{
if (mgr_data -> F_fine_array[i])
{
hypre_ParVectorDestroy((mgr_data -> F_fine_array)[i]);
}
if (mgr_data -> U_fine_array[i])
{
hypre_ParVectorDestroy((mgr_data -> U_fine_array)[i]);
}
}
for (i = 1; i < (num_coarse_levels); i++)
{
if ((mgr_data -> A_ff_array)[i])
{
hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[i]);
}
}
if (mgr_data -> use_default_fsolver)
{
hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[0]);
}
hypre_TFree(mgr_data -> F_fine_array, HYPRE_MEMORY_HOST);
(mgr_data -> F_fine_array) = NULL;
hypre_TFree(mgr_data -> U_fine_array, HYPRE_MEMORY_HOST);
(mgr_data -> U_fine_array) = NULL;
hypre_TFree(mgr_data -> A_ff_array, HYPRE_MEMORY_HOST);
(mgr_data -> A_ff_array) = NULL;
}
if (mgr_data -> aff_solver)
{
for (i = 1; i < (num_coarse_levels); i++)
{
if ((mgr_data -> aff_solver)[i])
{
hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[i]);
}
}
if (mgr_data -> use_default_fsolver)
{
if ((mgr_data -> aff_solver)[0])
{
hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[0]);
}
}
hypre_TFree(mgr_data -> aff_solver, HYPRE_MEMORY_HOST);
(mgr_data -> aff_solver) = NULL;
}
if ((mgr_data -> F_array))
{
hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST);
(mgr_data -> F_array) = NULL;
}
if ((mgr_data -> U_array))
{
hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST);
(mgr_data -> U_array) = NULL;
}
if ((mgr_data -> A_array))
{
hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST);
(mgr_data -> A_array) = NULL;
}
if ((mgr_data -> P_array))
{
hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST);
(mgr_data -> P_array) = NULL;
}
if ((mgr_data -> RT_array))
{
hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST);
(mgr_data -> RT_array) = NULL;
}
if ((mgr_data -> CF_marker_array))
{
hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST);
(mgr_data -> CF_marker_array) = NULL;
}
if ((mgr_data -> reserved_Cpoint_local_indexes))
{
hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> reserved_Cpoint_local_indexes) = NULL;
}
if (mgr_data -> restrict_type)
{
hypre_TFree(mgr_data -> restrict_type, HYPRE_MEMORY_HOST);
(mgr_data -> restrict_type) = NULL;
}
if (mgr_data -> interp_type)
{
hypre_TFree(mgr_data -> interp_type, HYPRE_MEMORY_HOST);
(mgr_data -> interp_type) = NULL;
}
/* Frelax_method */
if (mgr_data -> Frelax_method)
{
hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_method) = NULL;
}
/* Frelax_num_functions */
if (mgr_data -> Frelax_num_functions)
{
hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_num_functions) = NULL;
}
/* data for V-cycle F-relaxation */
if ((mgr_data -> VcycleRelaxVtemp))
{
hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxVtemp) );
(mgr_data -> VcycleRelaxVtemp) = NULL;
}
if ((mgr_data -> VcycleRelaxZtemp))
{
hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxZtemp) );
(mgr_data -> VcycleRelaxZtemp) = NULL;
}
if (mgr_data -> FrelaxVcycleData)
{
for (i = 0; i < num_coarse_levels; i++)
{
if ((mgr_data -> FrelaxVcycleData)[i])
{
hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]);
(mgr_data -> FrelaxVcycleData)[i] = NULL;
}
}
hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST);
(mgr_data -> FrelaxVcycleData) = NULL;
}
/* data for reserved coarse nodes */
if (mgr_data -> reserved_coarse_indexes)
{
hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST);
(mgr_data -> reserved_coarse_indexes) = NULL;
}
/* index array for setting Cpoints by global block */
if ((mgr_data -> set_c_points_method) == 1)
{
hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST);
(mgr_data -> idx_array) = NULL;
}
/* array for setting option to use non-Galerkin coarse grid */
if (mgr_data -> use_non_galerkin_cg)
{
hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST);
(mgr_data -> use_non_galerkin_cg) = NULL;
}
/* coarse level matrix - RAP */
if ((mgr_data -> RAP))
{
hypre_ParCSRMatrixDestroy((mgr_data -> RAP));
}
if ((mgr_data -> diaginv))
{
hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST);
}
if ((mgr_data -> global_smoother))
{
if (mgr_data -> global_smooth_type == 8)
{
HYPRE_EuclidDestroy((mgr_data -> global_smoother));
}
else if (mgr_data -> global_smooth_type == 16)
{
HYPRE_ILUDestroy((mgr_data -> global_smoother));
}
}
/* mgr data */
hypre_TFree(mgr_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* Create data for V-cycle F-relaxtion */
void *
hypre_MGRCreateFrelaxVcycleData()
{
hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST);
hypre_ParAMGDataAArray(vdata) = NULL;
hypre_ParAMGDataPArray(vdata) = NULL;
hypre_ParAMGDataFArray(vdata) = NULL;
hypre_ParAMGDataCFMarkerArray(vdata) = NULL;
hypre_ParAMGDataVtemp(vdata) = NULL;
hypre_ParAMGDataAMat(vdata) = NULL;
hypre_ParAMGDataBVec(vdata) = NULL;
hypre_ParAMGDataZtemp(vdata) = NULL;
hypre_ParAMGDataCommInfo(vdata) = NULL;
hypre_ParAMGDataUArray(vdata) = NULL;
hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL;
hypre_ParAMGDataNumLevels(vdata) = 0;
hypre_ParAMGDataMaxLevels(vdata) = 10;
hypre_ParAMGDataNumFunctions(vdata) = 1;
hypre_ParAMGDataSCommPkgSwitch(vdata) = 1.0;
hypre_ParAMGDataRelaxOrder(vdata) = 1;
hypre_ParAMGDataMaxCoarseSize(vdata) = 9;
hypre_ParAMGDataMinCoarseSize(vdata) = 0;
hypre_ParAMGDataUserCoarseRelaxType(vdata) = 9;
return (void *) vdata;
}
/* Destroy data for V-cycle F-relaxation */
HYPRE_Int
hypre_MGRDestroyFrelaxVcycleData( void *data )
{
hypre_ParAMGData * vdata = (hypre_ParAMGData*) data;
HYPRE_Int i;
HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata);
MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata);
hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[0], HYPRE_MEMORY_HOST);
for (i = 1; i < num_levels + 1; i++)
{
if (hypre_ParAMGDataAArray(vdata)[i])
{
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]);
}
if (hypre_ParAMGDataPArray(vdata)[i - 1])
{
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i - 1]);
}
hypre_IntArrayDestroy(hypre_ParAMGDataCFMarkerArray(vdata)[i - 1]);
hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]);
hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]);
hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[i], HYPRE_MEMORY_HOST);
}
if (num_levels < 1)
{
hypre_IntArrayDestroy(hypre_ParAMGDataCFMarkerArray(vdata)[0]);
}
/* Points to VcycleRelaxVtemp of mgr_data, which is already destroyed */
//hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata));
hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST);
//hypre_TFree(hypre_ParAMGDataGridRelaxType(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata), HYPRE_MEMORY_HOST);
/* Points to VcycleRelaxZtemp of mgr_data, which is already destroyed */
/*
if (hypre_ParAMGDataZtemp(vdata))
hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata));
*/
if (hypre_ParAMGDataAMat(vdata)) { hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); }
if (hypre_ParAMGDataBVec(vdata)) { hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); }
if (hypre_ParAMGDataCommInfo(vdata)) { hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); }
if (new_comm != hypre_MPI_COMM_NULL)
{
hypre_MPI_Comm_free (&new_comm);
}
hypre_TFree(vdata, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* Set C-point variables for each reduction level */
/* Currently not implemented */
HYPRE_Int
hypre_MGRSetReductionLevelCpoints( void *mgr_vdata,
HYPRE_Int nlevels,
HYPRE_Int *num_coarse_points,
HYPRE_Int **level_coarse_indexes)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_coarse_levels) = nlevels;
(mgr_data -> num_coarse_per_level) = num_coarse_points;
(mgr_data -> level_coarse_indexes) = level_coarse_indexes;
return hypre_error_flag;
}
/* Initialize some data */
/* Set whether non-coarse points on each level should be explicitly tagged as F-points */
HYPRE_Int
hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag;
return hypre_error_flag;
}
/* Set whether the reserved C points are reduced before the coarse grid solve */
HYPRE_Int
hypre_MGRSetReservedCpointsLevelToKeep(void *mgr_vdata, HYPRE_Int level)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> lvl_to_keep_cpoints) = level;
return hypre_error_flag;
}
/* Set Cpoints by contiguous blocks, i.e. p1, p2, ..., pn, s1, s2, ..., sn, ... */
HYPRE_Int
hypre_MGRSetCpointsByContiguousBlock( void *mgr_vdata,
HYPRE_Int block_size,
HYPRE_Int max_num_levels,
HYPRE_BigInt *begin_idx_array,
HYPRE_Int *block_num_coarse_points,
HYPRE_Int **block_coarse_indexes)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
if ((mgr_data -> idx_array) != NULL)
{
hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST);
(mgr_data -> idx_array) = NULL;
}
HYPRE_BigInt *index_array = hypre_CTAlloc(HYPRE_BigInt, block_size, HYPRE_MEMORY_HOST);
if (begin_idx_array != NULL)
{
for (i = 0; i < block_size; i++)
{
index_array[i] = *(begin_idx_array + i);
}
}
hypre_MGRSetCpointsByBlock(mgr_data, block_size, max_num_levels, block_num_coarse_points,
block_coarse_indexes);
(mgr_data -> idx_array) = index_array;
(mgr_data -> set_c_points_method) = 1;
return hypre_error_flag;
}
/* Initialize/ set local block data information */
HYPRE_Int
hypre_MGRSetCpointsByBlock( void *mgr_vdata,
HYPRE_Int block_size,
HYPRE_Int max_num_levels,
HYPRE_Int *block_num_coarse_points,
HYPRE_Int **block_coarse_indexes)
{
HYPRE_Int i, j;
HYPRE_Int **block_cf_marker = NULL;
HYPRE_Int *block_num_coarse_indexes = NULL;
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
/* free block cf_marker data if not previously destroyed */
if ((mgr_data -> block_cf_marker) != NULL)
{
for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker)[i] = NULL;
}
}
hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if ((mgr_data -> block_num_coarse_indexes))
{
hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* store block cf_marker */
block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_levels; i++)
{
block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST);
memset(block_cf_marker[i], FMRK, block_size * sizeof(HYPRE_Int));
}
for (i = 0; i < max_num_levels; i++)
{
for (j = 0; j < block_num_coarse_points[i]; j++)
{
(block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK;
}
}
/* store block_num_coarse_points */
if (max_num_levels > 0)
{
block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_levels; i++)
{
block_num_coarse_indexes[i] = block_num_coarse_points[i];
}
}
/* set block data */
(mgr_data -> max_num_coarse_levels) = max_num_levels;
(mgr_data -> block_size) = block_size;
(mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes;
(mgr_data -> block_cf_marker) = block_cf_marker;
(mgr_data -> set_c_points_method) = 0;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRSetCpointsByPointMarkerArray( void *mgr_vdata,
HYPRE_Int block_size,
HYPRE_Int max_num_levels,
HYPRE_Int *lvl_num_coarse_points,
HYPRE_Int **lvl_coarse_indexes,
HYPRE_Int *point_marker_array)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i, j;
HYPRE_Int **block_cf_marker = NULL;
HYPRE_Int *block_num_coarse_indexes = NULL;
/* free block cf_marker data if not previously destroyed */
if ((mgr_data -> block_cf_marker) != NULL)
{
for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker)[i] = NULL;
}
}
hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if ((mgr_data -> block_num_coarse_indexes))
{
hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* store block cf_marker */
block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_levels; i++)
{
block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST);
memset(block_cf_marker[i], FMRK, block_size * sizeof(HYPRE_Int));
}
for (i = 0; i < max_num_levels; i++)
{
for (j = 0; j < lvl_num_coarse_points[i]; j++)
{
block_cf_marker[i][j] = lvl_coarse_indexes[i][j];
}
}
/* store block_num_coarse_points */
if (max_num_levels > 0)
{
block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_levels; i++)
{
block_num_coarse_indexes[i] = lvl_num_coarse_points[i];
}
}
/* set block data */
(mgr_data -> max_num_coarse_levels) = max_num_levels;
(mgr_data -> block_size) = block_size;
(mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes;
(mgr_data -> block_cf_marker) = block_cf_marker;
(mgr_data -> point_marker_array) = point_marker_array;
(mgr_data -> set_c_points_method) = 2;
return hypre_error_flag;
}
/*Set number of points that remain part of the coarse grid throughout the hierarchy */
HYPRE_Int
hypre_MGRSetReservedCoarseNodes(void *mgr_vdata,
HYPRE_Int reserved_coarse_size,
HYPRE_BigInt *reserved_cpt_index)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_BigInt *reserved_coarse_indexes = NULL;
HYPRE_Int i;
if (!mgr_data)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! MGR object empty!\n");
return hypre_error_flag;
}
if (reserved_coarse_size < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/* free data not previously destroyed */
if ((mgr_data -> reserved_coarse_indexes))
{
hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> reserved_coarse_indexes) = NULL;
}
/* set reserved coarse nodes */
if (reserved_coarse_size > 0)
{
reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST);
for (i = 0; i < reserved_coarse_size; i++)
{
reserved_coarse_indexes[i] = reserved_cpt_index[i];
}
}
(mgr_data -> reserved_coarse_size) = reserved_coarse_size;
(mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes;
return hypre_error_flag;
}
/* Set CF marker array */
HYPRE_Int
hypre_MGRCoarsen(hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int fixed_coarse_size,
HYPRE_Int *fixed_coarse_indexes,
HYPRE_Int debug_flag,
hypre_IntArray **CF_marker_ptr,
HYPRE_Int cflag)
{
HYPRE_Int *CF_marker = NULL;
HYPRE_Int *cindexes = fixed_coarse_indexes;
HYPRE_Int i, row, nc;
HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
/* If this is the last level, coarsen onto fixed coarse set */
if (cflag)
{
if (*CF_marker_ptr != NULL)
{
hypre_IntArrayDestroy(*CF_marker_ptr);
}
*CF_marker_ptr = hypre_IntArrayCreate(nloc);
hypre_IntArrayInitialize(*CF_marker_ptr);
hypre_IntArraySetConstantValues(*CF_marker_ptr, FMRK);
CF_marker = hypre_IntArrayData(*CF_marker_ptr);
/* first mark fixed coarse set */
nc = fixed_coarse_size;
for (i = 0; i < nc; i++)
{
CF_marker[cindexes[i]] = CMRK;
}
}
else
{
/* First coarsen to get initial CF splitting.
* This is then followed by updating the CF marker to pass
* coarse information to the next levels. NOTE: It may be
* convenient to implement this way (allows the use of multiple
* coarsening strategies without changing too much code),
* but not necessarily the best option, compared to initializing
* CF_marker first and then coarsening on subgraph which excludes
* the initialized coarse nodes.
*/
hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, CF_marker_ptr);
CF_marker = hypre_IntArrayData(*CF_marker_ptr);
/* Update CF_marker to correct Cpoints marked as Fpoints. */
nc = fixed_coarse_size;
for (i = 0; i < nc; i++)
{
CF_marker[cindexes[i]] = CMRK;
}
/* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate
* between type of F-points (example Ruge coarsening). We do not need that distinction here.
*/
for (row = 0; row < nloc; row++)
{
if (CF_marker[row] == CMRK) { continue; }
CF_marker[row] = FMRK;
}
#if 0
/* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points
* in the next level.
*/
nc = 0;
index_i = 0;
for (row = 0; row < nloc; row++)
{
/* loop through new c-points */
if (CF_marker[row] == CMRK) { nc++; }
else if (CF_marker[row] == S_CMRK)
{
/* previously marked c-point is part of fixed coarse set. Track its current local index */
cindexes[index_i++] = nc;
/* reset c-point from S_CMRK to CMRK */
cf_marker[row] = CMRK;
nc++;
}
/* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate
* between type of F-points (example Ruge coarsening). We do not need that distinction here.
*/
else
{
CF_marker[row] = FMRK;
}
}
/* check if this should be last level */
if ( nc == fixed_coarse_size)
{
last_level = 1;
}
//printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size);
#endif
}
return hypre_error_flag;
}
/* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */
HYPRE_Int
hypre_MGRBuildP( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int method,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *a_diag;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
// HYPRE_Int jj_begin_row,jj_begin_row_offd;
// HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_BigInt *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i, i1;
HYPRE_Int j, jl, jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//num_threads = hypre_NumThreads();
// Temporary fix, disable threading
// TODO: enable threading
num_threads = 1;
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); }
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++)
{
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; }
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (j < rest)
{
ns = j * size + j;
ne = (j + 1) * size + j + 1;
}
else
{
ns = j * size + rest;
ne = (j + 1) * size + rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
else
{
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
if ((CF_marker[i1] >= 0) && (method > 0))
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if ((CF_marker_offd[i1] >= 0) && (method > 0))
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i = 0; i < num_threads - 1; i++)
{
coarse_counter[i + 1] += coarse_counter[i];
jj_count[i + 1] += jj_count[i];
jj_count_offd[i + 1] += jj_count_offd[i];
}
i = num_threads - 1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) { coarse_shift = coarse_counter[j - 1]; }
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (j < rest)
{
ns = j * size + j;
ne = (j + 1) * size + j + 1;
}
else
{
ns = j * size + rest;
ne = (j + 1) * size + rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
/* index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
big_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt;
}
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
*/
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
//for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] < 0)
{
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
if ( i == i1 ) /* diagonal of A only */
{
a_diag[i] = 1.0 / A_diag_data[jj];
}
}
}
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (jl < rest)
{
ns = jl * size + jl;
ne = (jl + 1) * size + jl + 1;
}
else
{
ns = jl * size + rest;
ne = (jl + 1) * size + rest;
}
jj_counter = 0;
if (jl > 0) { jj_counter = jj_count[jl - 1]; }
jj_counter_offd = 0;
if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
else
{
P_marker_offd = NULL;
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if ((CF_marker[i1] >= 0) && (method > 0))
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
/*
if(method == 0)
{
P_diag_data[jj_counter] = 0.0;
}
*/
if (method == 1)
{
P_diag_data[jj_counter] = - A_diag_data[jj];
}
else if (method == 2)
{
P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i];
}
jj_counter++;
}
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if ((CF_marker_offd[i1] >= 0) && (method > 0))
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
/*
if(method == 0)
{
P_offd_data[jj_counter_offd] = 0.0;
}
*/
if (method == 1)
{
P_offd_data[jj_counter_offd] = - A_offd_data[jj];
}
else if (method == 2)
{
P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i];
}
jj_counter_offd++;
}
}
}
}
P_offd_i[i + 1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
hypre_TFree(a_diag, HYPRE_MEMORY_HOST);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker[i] = 0;
}
num_cols_P_offd = 0;
for (i = 0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_cols_P_offd; i++)
{
while (P_marker[index] == 0) { index++; }
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n_fine; i++)
if (CF_marker[i] == -3) { CF_marker[i] = -1; }
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return (0);
}
/* Interpolation for MGR - Dynamic Row Sum method */
HYPRE_Int
hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *a_diag;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
// HYPRE_Int jj_begin_row,jj_begin_row_offd;
// HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_BigInt *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i, i1;
HYPRE_Int j, jl, jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//num_threads = hypre_NumThreads();
// Temporary fix, disable threading
// TODO: enable threading
num_threads = 1;
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); }
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; }
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (j < rest)
{
ns = j * size + j;
ne = (j + 1) * size + j + 1;
}
else
{
ns = j * size + rest;
ne = (j + 1) * size + rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
else
{
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
/*--------------------------------------------------------------------
* Set up the indexes for the DRS method
*--------------------------------------------------------------------*/
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i = 0; i < num_threads - 1; i++)
{
coarse_counter[i + 1] += coarse_counter[i];
jj_count[i + 1] += jj_count[i];
jj_count_offd[i + 1] += jj_count_offd[i];
}
i = num_threads - 1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) { coarse_shift = coarse_counter[j - 1]; }
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (j < rest)
{
ns = j * size + j;
ne = (j + 1) * size + j + 1;
}
else
{
ns = j * size + rest;
ne = (j + 1) * size + rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
*/
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
//for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
if ( i == i1 ) /* diagonal of A only */
{
a_diag[i] = 1.0 / A_diag_data[jj];
}
}
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (jl < rest)
{
ns = jl * size + jl;
ne = (jl + 1) * size + jl + 1;
}
else
{
ns = jl * size + rest;
ne = (jl + 1) * size + rest;
}
jj_counter = 0;
if (jl > 0) { jj_counter = jj_count[jl - 1]; }
jj_counter_offd = 0;
if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
else
{
P_marker_offd = NULL;
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i];
jj_counter++;
}
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i];
jj_counter_offd++;
}
}
}
}
P_offd_i[i + 1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
hypre_TFree(a_diag, HYPRE_MEMORY_HOST);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker[i] = 0;
}
num_cols_P_offd = 0;
for (i = 0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_cols_P_offd; i++)
{
while (P_marker[index] == 0) { index++; }
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n_fine; i++)
if (CF_marker[i] == -3) { CF_marker[i] = -1; }
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
// hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return (0);
}
/* Scale ParCSR matrix A = scalar * A
* A: the target CSR matrix
* vector: array of real numbers
*/
HYPRE_Int
hypre_ParCSRMatrixLeftScale(HYPRE_Real *vector,
hypre_ParCSRMatrix *A)
{
HYPRE_Int i, j, n_local;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
n_local = hypre_CSRMatrixNumRows(A_diag);
for (i = 0; i < n_local; i++)
{
HYPRE_Real factor = vector[i];
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
A_diag_data[j] *= factor;
}
for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++)
{
A_offd_data[j] *= factor;
}
}
return (0);
}
/************************************************************
* Available methods:
* 0: inv(A_FF) approximated by its diagonal inverse
* 1: inv(A_FF) approximated by sparse approximate inverse
*************************************************************/
HYPRE_Int
hypre_MGRComputeNonGalerkinCoarseGrid(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *P,
hypre_ParCSRMatrix *RT,
HYPRE_Int bsize,
HYPRE_Int ordering,
HYPRE_Int method,
HYPRE_Int Pmax,
HYPRE_Int keep_stencil,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix **A_h_ptr)
{
HYPRE_Int *c_marker, *f_marker;
HYPRE_Int n_local_fine_grid, i, i1, jj;
hypre_ParCSRMatrix *A_cc;
hypre_ParCSRMatrix *A_ff;
hypre_ParCSRMatrix *A_fc;
hypre_ParCSRMatrix *A_cf;
hypre_ParCSRMatrix *A_h;
hypre_ParCSRMatrix *A_h_correction;
HYPRE_Int max_elmts = Pmax;
// HYPRE_Real wall_time = 0.;
hypre_ParCSRMatrix *P_mod = NULL;
HYPRE_Int my_id;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_MPI_Comm_rank(comm, &my_id);
HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A);
n_local_fine_grid = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
c_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST);
f_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST);
for (i = 0; i < n_local_fine_grid; i++)
{
HYPRE_Int point_type = CF_marker[i];
hypre_assert(point_type == 1 || point_type == -1);
c_marker[i] = point_type;
f_marker[i] = -point_type;
}
// get the A_cc sub-block
hypre_MGRGetSubBlock(A, c_marker, c_marker, 0, &A_cc);
if (method == 0)
{
if (keep_stencil)
{
//wall_time = time_getWallclockSeconds();
hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf);
hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc);
hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff);
// extract the diagonal of A_ff and compute D_ff_inv
hypre_CSRMatrix *A_ff_diag = hypre_ParCSRMatrixDiag(A_ff);
HYPRE_Real *A_ff_diag_data = hypre_CSRMatrixData(A_ff_diag);
HYPRE_Int *A_ff_diag_i = hypre_CSRMatrixI(A_ff_diag);
HYPRE_Int *A_ff_diag_j = hypre_CSRMatrixJ(A_ff_diag);
HYPRE_Int n_local_fpoints = hypre_CSRMatrixNumRows(A_ff_diag);
HYPRE_Real *D_ff_inv;
D_ff_inv = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST);
for (i = 0; i < n_local_fpoints; i++)
{
for (jj = A_ff_diag_i[i]; jj < A_ff_diag_i[i + 1]; jj++)
{
i1 = A_ff_diag_j[jj];
if ( i == i1 )
{
D_ff_inv[i] = -1.0 / A_ff_diag_data[jj];
}
}
}
// extract the diagonal of A_cf
hypre_CSRMatrix *A_cf_diag = hypre_ParCSRMatrixDiag(A_cf);
HYPRE_Real *A_cf_diag_data = hypre_CSRMatrixData(A_cf_diag);
HYPRE_Int *A_cf_diag_i = hypre_CSRMatrixI(A_cf_diag);
HYPRE_Int *A_cf_diag_j = hypre_CSRMatrixJ(A_cf_diag);
n_local_fpoints = hypre_CSRMatrixNumRows(A_cf_diag);
HYPRE_Real *D_cf;
D_cf = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST);
for (i = 0; i < n_local_fpoints; i++)
{
i1 = A_cf_diag_j[A_cf_diag_i[i]];
D_cf[i] = A_cf_diag_data[jj];
}
// compute the triple product
hypre_ParCSRMatrixLeftScale(D_ff_inv, A_fc);
hypre_ParCSRMatrixLeftScale(D_cf, A_fc);
A_h_correction = A_fc;
hypre_TFree(D_cf, HYPRE_MEMORY_HOST);
hypre_TFree(D_ff_inv, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(A_ff);
hypre_ParCSRMatrixDestroy(A_cf);
//wall_time = time_getWallclockSeconds() - wall_time;
//hypre_printf("Compute triple product D_cf * D_ff_inv * A_fc time: %1.5f\n", wall_time);
}
else
{
//wall_time = time_getWallclockSeconds();
P_mod = hypre_ParCSRMatrixCompleteClone(P);
hypre_ParCSRMatrixCopy(P, P_mod, 1);
HYPRE_Int n_local_rows = hypre_ParCSRMatrixNumRows(P_mod);
hypre_CSRMatrix *P_mod_diag = hypre_ParCSRMatrixDiag(P_mod);
HYPRE_Int *P_mod_diag_i = hypre_CSRMatrixI(P_mod_diag);
HYPRE_Real *P_mod_diag_data = hypre_CSRMatrixData(P_mod_diag);
for (i = 0; i < n_local_rows; i ++)
{
if (CF_marker[i] >= 0)
{
HYPRE_Int ii = P_mod_diag_i[i];
P_mod_diag_data[ii] = 0.0;
}
}
hypre_BoomerAMGBuildCoarseOperator(RT, A, P_mod, &A_h_correction);
//wall_time = time_getWallclockSeconds() - wall_time;
//hypre_printf("Compute triple product time new: %1.5f\n", wall_time);
hypre_ParCSRMatrixDestroy(P_mod);
}
}
else
{
// Approximate inverse for ideal interploation
hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf);
hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc);
hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff);
hypre_ParCSRMatrix *A_ff_inv = NULL;
hypre_ParCSRMatrix *minus_Wp = NULL;
hypre_MGRApproximateInverse(A_ff, &A_ff_inv);
minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc);
A_h_correction = hypre_ParMatmul(A_cf, minus_Wp);
hypre_ParCSRMatrixDestroy(minus_Wp);
hypre_ParCSRMatrixDestroy(A_ff);
hypre_ParCSRMatrixDestroy(A_fc);
hypre_ParCSRMatrixDestroy(A_cf);
}
// perform dropping for A_h_correction
// specific to multiphase poromechanics
// we only keep the diagonal of each block
//wall_time = time_getWallclockSeconds();
HYPRE_Int n_local_cpoints = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_h_correction));
hypre_CSRMatrix *A_h_correction_diag = hypre_ParCSRMatrixDiag(A_h_correction);
HYPRE_Real *A_h_correction_diag_data = hypre_CSRMatrixData(A_h_correction_diag);
HYPRE_Int *A_h_correction_diag_i = hypre_CSRMatrixI(A_h_correction_diag);
HYPRE_Int *A_h_correction_diag_j = hypre_CSRMatrixJ(A_h_correction_diag);
HYPRE_Int ncol_diag = hypre_CSRMatrixNumCols(A_h_correction_diag);
hypre_CSRMatrix *A_h_correction_offd = hypre_ParCSRMatrixOffd(A_h_correction);
HYPRE_Real *A_h_correction_offd_data = hypre_CSRMatrixData(A_h_correction_offd);
HYPRE_Int *A_h_correction_offd_i = hypre_CSRMatrixI(A_h_correction_offd);
HYPRE_Int *A_h_correction_offd_j = hypre_CSRMatrixJ(A_h_correction_offd);
if (Pmax > 0)
{
if (ordering == 0) // interleaved ordering
{
HYPRE_Int *A_h_correction_diag_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints + 1,
memory_location);
HYPRE_Int *A_h_correction_diag_j_new = hypre_CTAlloc(HYPRE_Int,
(bsize + max_elmts) * n_local_cpoints, memory_location);
HYPRE_Complex *A_h_correction_diag_data_new = hypre_CTAlloc(HYPRE_Complex,
(bsize + max_elmts) * n_local_cpoints, memory_location);
HYPRE_Int num_nonzeros_diag_new = 0;
HYPRE_Int *A_h_correction_offd_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints + 1,
memory_location);
HYPRE_Int *A_h_correction_offd_j_new = hypre_CTAlloc(HYPRE_Int, max_elmts * n_local_cpoints,
memory_location);
HYPRE_Complex *A_h_correction_offd_data_new = hypre_CTAlloc(HYPRE_Complex,
max_elmts * n_local_cpoints, memory_location);
HYPRE_Int num_nonzeros_offd_new = 0;
for (i = 0; i < n_local_cpoints; i++)
{
HYPRE_Int max_num_nonzeros = A_h_correction_diag_i[i + 1] - A_h_correction_diag_i[i] +
A_h_correction_offd_i[i + 1] - A_h_correction_offd_i[i];
HYPRE_Int *aux_j = hypre_CTAlloc(HYPRE_Int, max_num_nonzeros, HYPRE_MEMORY_HOST);
HYPRE_Real *aux_data = hypre_CTAlloc(HYPRE_Real, max_num_nonzeros, HYPRE_MEMORY_HOST);
HYPRE_Int row_start = i - (i % bsize);
HYPRE_Int row_stop = row_start + bsize - 1;
HYPRE_Int cnt = 0;
for (jj = A_h_correction_offd_i[i]; jj < A_h_correction_offd_i[i + 1]; jj++)
{
aux_j[cnt] = A_h_correction_offd_j[jj] + ncol_diag;
aux_data[cnt] = A_h_correction_offd_data[jj];
cnt++;
}
for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i + 1]; jj++)
{
aux_j[cnt] = A_h_correction_diag_j[jj];
aux_data[cnt] = A_h_correction_diag_data[jj];
cnt++;
}
hypre_qsort2_abs(aux_j, aux_data, 0, cnt - 1);
for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i + 1]; jj++)
{
i1 = A_h_correction_diag_j[jj];
if (i1 >= row_start && i1 <= row_stop)
{
// copy data to new arrays
A_h_correction_diag_j_new[num_nonzeros_diag_new] = i1;
A_h_correction_diag_data_new[num_nonzeros_diag_new] = A_h_correction_diag_data[jj];
++num_nonzeros_diag_new;
}
else
{
// Do nothing
}
}
if (max_elmts > 0)
{
for (jj = 0; jj < hypre_min(max_elmts, cnt); jj++)
{
HYPRE_Int col_idx = aux_j[jj];
HYPRE_Real col_value = aux_data[jj];
if (col_idx < ncol_diag && (col_idx < row_start || col_idx > row_stop))
{
A_h_correction_diag_j_new[num_nonzeros_diag_new] = col_idx;
A_h_correction_diag_data_new[num_nonzeros_diag_new] = col_value;
++num_nonzeros_diag_new;
}
else if (col_idx >= ncol_diag)
{
A_h_correction_offd_j_new[num_nonzeros_offd_new] = col_idx - ncol_diag;
A_h_correction_offd_data_new[num_nonzeros_offd_new] = col_value;
++num_nonzeros_offd_new;
}
}
}
A_h_correction_diag_i_new[i + 1] = num_nonzeros_diag_new;
A_h_correction_offd_i_new[i + 1] = num_nonzeros_offd_new;
hypre_TFree(aux_j, HYPRE_MEMORY_HOST);
hypre_TFree(aux_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(A_h_correction_diag_i, memory_location);
hypre_TFree(A_h_correction_diag_j, memory_location);
hypre_TFree(A_h_correction_diag_data, memory_location);
hypre_CSRMatrixI(A_h_correction_diag) = A_h_correction_diag_i_new;
hypre_CSRMatrixJ(A_h_correction_diag) = A_h_correction_diag_j_new;
hypre_CSRMatrixData(A_h_correction_diag) = A_h_correction_diag_data_new;
hypre_CSRMatrixNumNonzeros(A_h_correction_diag) = num_nonzeros_diag_new;
if (A_h_correction_offd_i) { hypre_TFree(A_h_correction_offd_i, memory_location); }
if (A_h_correction_offd_j) { hypre_TFree(A_h_correction_offd_j, memory_location); }
if (A_h_correction_offd_data) { hypre_TFree(A_h_correction_offd_data, memory_location); }
hypre_CSRMatrixI(A_h_correction_offd) = A_h_correction_offd_i_new;
hypre_CSRMatrixJ(A_h_correction_offd) = A_h_correction_offd_j_new;
hypre_CSRMatrixData(A_h_correction_offd) = A_h_correction_offd_data_new;
hypre_CSRMatrixNumNonzeros(A_h_correction_offd) = num_nonzeros_offd_new;
}
else
{
hypre_printf("Error!! Block ordering for non-Galerkin coarse grid is not currently supported\n");
exit(-1);
}
}
//hypre_MGRParCSRMatrixTruncate(A_h_correction, max_elmts);
//wall_time = time_getWallclockSeconds() - wall_time;
//hypre_printf("Filter A_h_correction time: %1.5f\n", wall_time);
//hypre_ParCSRMatrixPrintIJ(A_h_correction,1,1,"A_h_correction_filtered");
// coarse grid / schur complement
hypre_ParCSRMatrixAdd(1.0, A_cc, 1.0, A_h_correction, &A_h);
*A_h_ptr = A_h;
//hypre_ParCSRMatrixPrintIJ(A_h,1,1,"A_h");
hypre_ParCSRMatrixDestroy(A_cc);
hypre_ParCSRMatrixDestroy(A_h_correction);
hypre_TFree(c_marker, HYPRE_MEMORY_HOST);
hypre_TFree(f_marker, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRComputeAlgebraicFixedStress(hypre_ParCSRMatrix *A,
HYPRE_BigInt *mgr_idx_array,
HYPRE_Solver A_ff_solver)
{
HYPRE_Int *U_marker, *S_marker, *P_marker;
HYPRE_Int n_fine, i;
HYPRE_BigInt ibegin;
hypre_ParCSRMatrix *A_up;
hypre_ParCSRMatrix *A_uu;
hypre_ParCSRMatrix *A_su;
hypre_ParCSRMatrix *A_pu;
hypre_ParVector *e1_vector;
hypre_ParVector *e2_vector;
hypre_ParVector *e3_vector;
hypre_ParVector *e4_vector;
hypre_ParVector *e5_vector;
n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
ibegin = hypre_ParCSRMatrixFirstRowIndex(A);
hypre_assert(ibegin == mgr_idx_array[0]);
U_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
S_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
U_marker[i] = -1;
S_marker[i] = -1;
P_marker[i] = -1;
}
// create C and F markers
for (i = 0; i < n_fine; i++)
{
if (i < mgr_idx_array[1] - ibegin)
{
U_marker[i] = 1;
}
else if (i >= (mgr_idx_array[1] - ibegin) && i < (mgr_idx_array[2] - ibegin))
{
S_marker[i] = 1;
}
else
{
P_marker[i] = 1;
}
}
// Get A_up
hypre_MGRGetSubBlock(A, U_marker, P_marker, 0, &A_up);
// GetA_uu
hypre_MGRGetSubBlock(A, U_marker, U_marker, 0, &A_uu);
// Get A_su
hypre_MGRGetSubBlock(A, S_marker, U_marker, 0, &A_su);
// Get A_pu
hypre_MGRGetSubBlock(A, P_marker, U_marker, 0, &A_pu);
e1_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_up),
hypre_ParCSRMatrixGlobalNumCols(A_up),
hypre_ParCSRMatrixColStarts(A_up));
hypre_ParVectorInitialize(e1_vector);
hypre_ParVectorSetConstantValues(e1_vector, 1.0);
e2_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu),
hypre_ParCSRMatrixGlobalNumRows(A_uu),
hypre_ParCSRMatrixRowStarts(A_uu));
hypre_ParVectorInitialize(e2_vector);
hypre_ParVectorSetConstantValues(e2_vector, 0.0);
e3_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu),
hypre_ParCSRMatrixGlobalNumRows(A_uu),
hypre_ParCSRMatrixRowStarts(A_uu));
hypre_ParVectorInitialize(e3_vector);
hypre_ParVectorSetConstantValues(e3_vector, 0.0);
e4_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_su),
hypre_ParCSRMatrixGlobalNumRows(A_su),
hypre_ParCSRMatrixRowStarts(A_su));
hypre_ParVectorInitialize(e4_vector);
hypre_ParVectorSetConstantValues(e4_vector, 0.0);
e5_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_pu),
hypre_ParCSRMatrixGlobalNumRows(A_pu),
hypre_ParCSRMatrixRowStarts(A_pu));
hypre_ParVectorInitialize(e5_vector);
hypre_ParVectorSetConstantValues(e5_vector, 0.0);
// compute e2 = A_up * e1
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_up, e1_vector, 0.0, e2_vector, e2_vector);
// solve e3 = A_uu^-1 * e2
hypre_BoomerAMGSolve(A_ff_solver, A_uu, e2_vector, e3_vector);
// compute e4 = A_su * e3
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector);
// compute e4 = A_su * e3
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector);
// print e4
hypre_ParVectorPrintIJ(e4_vector, 1, "Dsp");
// compute e5 = A_pu * e3
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_pu, e3_vector, 0.0, e5_vector, e5_vector);
hypre_ParVectorPrintIJ(e5_vector, 1, "Dpp");
hypre_ParVectorDestroy(e1_vector);
hypre_ParVectorDestroy(e2_vector);
hypre_ParVectorDestroy(e3_vector);
hypre_ParCSRMatrixDestroy(A_uu);
hypre_ParCSRMatrixDestroy(A_up);
hypre_ParCSRMatrixDestroy(A_pu);
hypre_ParCSRMatrixDestroy(A_su);
hypre_TFree(U_marker, HYPRE_MEMORY_HOST);
hypre_TFree(S_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRApproximateInverse(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix **A_inv)
{
HYPRE_Int print_level, mr_max_row_nnz, mr_max_iter, nsh_max_row_nnz, nsh_max_iter, mr_col_version;
HYPRE_Real mr_tol, nsh_tol;
HYPRE_Real *droptol = hypre_CTAlloc(HYPRE_Real, 2, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrix *approx_A_inv = NULL;
print_level = 0;
nsh_max_iter = 2;
nsh_max_row_nnz = 2; // default 1000
mr_max_iter = 1;
mr_tol = 1.0e-3;
mr_max_row_nnz = 2; // default 800
mr_col_version = 0;
nsh_tol = 1.0e-3;
droptol[0] = 1.0e-2;
droptol[1] = 1.0e-2;
hypre_ILUParCSRInverseNSH(A, &approx_A_inv, droptol, mr_tol, nsh_tol, DIVIDE_TOL, mr_max_row_nnz,
nsh_max_row_nnz, mr_max_iter, nsh_max_iter, mr_col_version, print_level);
*A_inv = approx_A_inv;
if (droptol) { hypre_TFree(droptol, HYPRE_MEMORY_HOST); }
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRBuildInterpApproximateInverseExp(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *S,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
HYPRE_Int *C_marker;
HYPRE_Int *F_marker;
hypre_ParCSRMatrix *A_fc;
hypre_ParCSRMatrix *minus_Wp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int coarse_counter;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
// HYPRE_BigInt my_first_cpt;
HYPRE_Int i, jj;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
// HYPRE_Int num_threads;
// HYPRE_Real wall_time; /* for debugging instrumentation */
C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
// create C and F markers
for (i = 0; i < n_fine; i++)
{
C_marker[i] = (CF_marker[i] == 1) ? 1 : -1;
F_marker[i] = (CF_marker[i] == 1) ? -1 : 1;
}
// Get A_FC
hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc);
// compute -Wp
minus_Wp = hypre_ParMatmul(S, A_fc);
hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp);
HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag);
hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp);
HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
// num_threads = hypre_NumThreads();
// my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; }
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
HYPRE_Int row_counter = 0;
coarse_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
else
{
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++)
{
jj_counter++;
}
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++)
{
jj_counter_offd++;
}
}
row_counter++;
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
row_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++)
{
P_diag_j[jj_counter] = minus_Wp_diag_j[jj];
P_diag_data[jj_counter] = - minus_Wp_diag_data[jj];
jj_counter++;
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++)
{
P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj];
P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj];
jj_counter_offd++;
}
}
row_counter++;
}
P_offd_i[i + 1] = jj_counter_offd;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd);
HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp);
if (P_offd_size)
{
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < num_cols_P_offd; i++)
{
col_map_offd_P[i] = col_map_offd_tmp[i];
}
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(C_marker, HYPRE_MEMORY_HOST);
hypre_TFree(F_marker, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(A_fc);
hypre_ParCSRMatrixDestroy(minus_Wp);
return 0;
}
HYPRE_Int
hypre_MGRBuildInterpApproximateInverse(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
HYPRE_Int *C_marker;
HYPRE_Int *F_marker;
hypre_ParCSRMatrix *A_ff;
hypre_ParCSRMatrix *A_fc;
hypre_ParCSRMatrix *A_ff_inv;
hypre_ParCSRMatrix *minus_Wp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int jj_counter, jj_counter_offd;
//HYPRE_Int jj_begin_row,jj_begin_row_offd;
//HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *fine_to_coarse = NULL;
//HYPRE_Int *coarse_counter;
HYPRE_Int coarse_counter;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
// HYPRE_BigInt my_first_cpt;
HYPRE_Int i, jj;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
// HYPRE_Int num_threads;
// HYPRE_Real wall_time; /* for debugging instrumentation */
C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
// create C and F markers
for (i = 0; i < n_fine; i++)
{
C_marker[i] = (CF_marker[i] == 1) ? 1 : -1;
F_marker[i] = (CF_marker[i] == 1) ? -1 : 1;
}
// Get A_FF
hypre_MGRGetSubBlock(A, F_marker, F_marker, 0, &A_ff);
// hypre_ParCSRMatrixPrintIJ(A_ff, 1, 1, "A_ff");
// Get A_FC
hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc);
hypre_MGRApproximateInverse(A_ff, &A_ff_inv);
// hypre_ParCSRMatrixPrintIJ(A_ff_inv, 1, 1, "A_ff_inv");
// hypre_ParCSRMatrixPrintIJ(A_fc, 1, 1, "A_fc");
minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc);
// hypre_ParCSRMatrixPrintIJ(minus_Wp, 1, 1, "Wp");
hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp);
HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag);
hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp);
HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd);
//hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp);
//HYPRE_Int num_cols_minus_Wp_offd = hypre_CSRMatrixNumCols(minus_Wp_offd);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
// num_threads = hypre_NumThreads();
// my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
//coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
//jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
//jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; }
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
HYPRE_Int row_counter = 0;
coarse_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
//jj_count[j]++;
//fine_to_coarse[i] = coarse_counter[j];
//coarse_counter[j]++;
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
else
{
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++)
{
//jj_count[j]++;
jj_counter++;
}
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++)
{
//jj_count_offd[j]++;
jj_counter_offd++;
}
}
row_counter++;
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
/*
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
*/
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
*/
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
/*
if (num_procs > 1)
{
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] += my_first_cpt;
}
comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(minus_Wp);
comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
}
*/
row_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++)
{
//P_marker[row_counter] = jj_counter;
P_diag_j[jj_counter] = minus_Wp_diag_j[jj];
P_diag_data[jj_counter] = - minus_Wp_diag_data[jj];
jj_counter++;
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++)
{
//P_marker_offd[row_counter] = jj_counter_offd;
P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj];
P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj];
jj_counter_offd++;
}
}
row_counter++;
}
P_offd_i[i + 1] = jj_counter_offd;
}
//hypre_printf("Num rows of Wp = %d\n", row_counter);
//P_offd_i[row_counter] = jj_counter_offd;
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd);
HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp);
if (P_offd_size)
{
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < num_cols_P_offd; i++)
{
col_map_offd_P[i] = col_map_offd_tmp[i];
}
}
/*
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < num_cols_minus_Wp_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
*/
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
//hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd);
*P_ptr = P;
//hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
//hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//if (fine_to_coarse_offd) hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
//hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
//hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
//hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
hypre_TFree(C_marker, HYPRE_MEMORY_HOST);
hypre_TFree(F_marker, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(A_ff);
hypre_ParCSRMatrixDestroy(A_fc);
hypre_ParCSRMatrixDestroy(A_ff_inv);
hypre_ParCSRMatrixDestroy(minus_Wp);
return 0;
}
/* Setup interpolation operator */
HYPRE_Int
hypre_MGRBuildInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P,
HYPRE_Int interp_type,
HYPRE_Int numsweeps)
{
// HYPRE_Int i;
hypre_ParCSRMatrix *P_ptr = NULL;
// HYPRE_Real jac_trunc_threshold = trunc_factor;
// HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold;
/* Interpolation for each level */
if (interp_type < 3)
{
hypre_MGRBuildP( A, CF_marker, num_cpts_global, interp_type, debug_flag, &P_ptr);
/* Could do a few sweeps of Jacobi to further improve Jacobi interpolation P */
/*
if(interp_type == 2)
{
for(i=0; i<numsweeps; i++)
{
hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus );
}
hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts);
}
*/
}
else if (interp_type == 4)
{
hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &P_ptr);
hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts);
}
else if (interp_type == 99)
{
hypre_MGRBuildInterpApproximateInverseExp(A, S, CF_marker, num_cpts_global, debug_flag, &P_ptr);
hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts);
}
else
{
/* Classical modified interpolation */
hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag,
trunc_factor, max_elmts, &P_ptr);
}
/* set pointer to P */
*P = P_ptr;
return hypre_error_flag;
}
/* Setup restriction operator */
HYPRE_Int
hypre_MGRBuildRestrict(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Real strong_threshold,
HYPRE_Real max_row_sum,
hypre_ParCSRMatrix **R,
HYPRE_Int restrict_type,
HYPRE_Int numsweeps)
{
// HYPRE_Int i;
hypre_ParCSRMatrix *R_ptr = NULL;
hypre_ParCSRMatrix *AT = NULL;
hypre_ParCSRMatrix *ST = NULL;
// HYPRE_Real jac_trunc_threshold = trunc_factor;
// HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold;
/* Build AT (transpose A) */
if (restrict_type > 0)
{
hypre_ParCSRMatrixTranspose(A, &AT, 1);
}
/* Restriction for each level */
if (restrict_type == 0)
{
hypre_MGRBuildP(A, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr);
}
else if (restrict_type == 1 || restrict_type == 2)
{
hypre_MGRBuildP(AT, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr);
}
else if (restrict_type == 3)
{
/* move diagonal to first entry */
hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(AT));
hypre_MGRBuildInterpApproximateInverse(AT, CF_marker, num_cpts_global, debug_flag, &R_ptr);
hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts);
}
else
{
/* Build new strength matrix */
hypre_BoomerAMGCreateS(AT, strong_threshold, max_row_sum, 1, NULL, &ST);
/* Classical modified interpolation */
hypre_BoomerAMGBuildInterp(AT, CF_marker, ST, num_cpts_global, 1, NULL, debug_flag,
trunc_factor, max_elmts, &R_ptr);
}
/* set pointer to P */
*R = R_ptr;
/* Free memory */
if (restrict_type > 0)
{
hypre_ParCSRMatrixDestroy(AT);
}
if (restrict_type > 5)
{
hypre_ParCSRMatrixDestroy(ST);
}
return hypre_error_flag;
}
void hypre_blas_smat_inv_n4 (HYPRE_Real *a)
{
const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3];
const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7];
const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11];
const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15];
const HYPRE_Real M11 = a22 * a33 * a44 + a23 * a34 * a42 + a24 * a32 * a43 - a22 * a34 * a43 - a23 *
a32 * a44 - a24 * a33 * a42;
const HYPRE_Real M12 = a12 * a34 * a43 + a13 * a32 * a44 + a14 * a33 * a42 - a12 * a33 * a44 - a13 *
a34 * a42 - a14 * a32 * a43;
const HYPRE_Real M13 = a12 * a23 * a44 + a13 * a24 * a42 + a14 * a22 * a43 - a12 * a24 * a43 - a13 *
a22 * a44 - a14 * a23 * a42;
const HYPRE_Real M14 = a12 * a24 * a33 + a13 * a22 * a34 + a14 * a23 * a32 - a12 * a23 * a34 - a13 *
a24 * a32 - a14 * a22 * a33;
const HYPRE_Real M21 = a21 * a34 * a43 + a23 * a31 * a44 + a24 * a33 * a41 - a21 * a33 * a44 - a23 *
a34 * a41 - a24 * a31 * a43;
const HYPRE_Real M22 = a11 * a33 * a44 + a13 * a34 * a41 + a14 * a31 * a43 - a11 * a34 * a43 - a13 *
a31 * a44 - a14 * a33 * a41;
const HYPRE_Real M23 = a11 * a24 * a43 + a13 * a21 * a44 + a14 * a23 * a41 - a11 * a23 * a44 - a13 *
a24 * a41 - a14 * a21 * a43;
const HYPRE_Real M24 = a11 * a23 * a34 + a13 * a24 * a31 + a14 * a21 * a33 - a11 * a24 * a33 - a13 *
a21 * a34 - a14 * a23 * a31;
const HYPRE_Real M31 = a21 * a32 * a44 + a22 * a34 * a41 + a24 * a31 * a42 - a21 * a34 * a42 - a22 *
a31 * a44 - a24 * a32 * a41;
const HYPRE_Real M32 = a11 * a34 * a42 + a12 * a31 * a44 + a14 * a32 * a41 - a11 * a32 * a44 - a12 *
a34 * a41 - a14 * a31 * a42;
const HYPRE_Real M33 = a11 * a22 * a44 + a12 * a24 * a41 + a14 * a21 * a42 - a11 * a24 * a42 - a12 *
a21 * a44 - a14 * a22 * a41;
const HYPRE_Real M34 = a11 * a24 * a32 + a12 * a21 * a34 + a14 * a22 * a31 - a11 * a22 * a34 - a12 *
a24 * a31 - a14 * a21 * a32;
const HYPRE_Real M41 = a21 * a33 * a42 + a22 * a31 * a43 + a23 * a32 * a41 - a21 * a32 * a43 - a22 *
a33 * a41 - a23 * a31 * a42;
const HYPRE_Real M42 = a11 * a32 * a43 + a12 * a33 * a41 + a13 * a31 * a42 - a11 * a33 * a42 - a12 *
a31 * a43 - a13 * a32 * a41;
const HYPRE_Real M43 = a11 * a23 * a42 + a12 * a21 * a43 + a13 * a22 * a41 - a11 * a22 * a43 - a12 *
a23 * a41 - a13 * a21 * a42;
const HYPRE_Real M44 = a11 * a22 * a33 + a12 * a23 * a31 + a13 * a21 * a32 - a11 * a23 * a32 - a12 *
a21 * a33 - a13 * a22 * a31;
const HYPRE_Real det = a11 * M11 + a12 * M21 + a13 * M31 + a14 * M41;
HYPRE_Real det_inv;
//if ( fabs(det) < 1e-22 ) {
//hypre_printf("### WARNING: Matrix is nearly singular! det = %e\n", det);
/*
printf("##----------------------------------------------\n");
printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2);
printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5);
printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7);
printf("##----------------------------------------------\n");
getchar();
*/
//}
det_inv = 1.0 / det;
a[0] = M11 * det_inv; a[1] = M12 * det_inv; a[2] = M13 * det_inv; a[3] = M14 * det_inv;
a[4] = M21 * det_inv; a[5] = M22 * det_inv; a[6] = M23 * det_inv; a[7] = M24 * det_inv;
a[8] = M31 * det_inv; a[9] = M32 * det_inv; a[10] = M33 * det_inv; a[11] = M34 * det_inv;
a[12] = M41 * det_inv; a[13] = M42 * det_inv; a[14] = M43 * det_inv; a[15] = M44 * det_inv;
}
void hypre_blas_mat_inv(HYPRE_Real *a,
HYPRE_Int n)
{
HYPRE_Int i, j, k, l, u, kn, in;
HYPRE_Real alinv;
if (n == 4)
{
hypre_blas_smat_inv_n4(a);
}
else
{
for (k = 0; k < n; ++k)
{
kn = k * n;
l = kn + k;
//if (fabs(a[l]) < SMALLREAL) {
// printf("### WARNING: Diagonal entry is close to zero!");
// printf("### WARNING: diag_%d=%e\n", k, a[l]);
// a[l] = SMALLREAL;
//}
alinv = 1.0 / a[l];
a[l] = alinv;
for (j = 0; j < k; ++j)
{
u = kn + j; a[u] *= alinv;
}
for (j = k + 1; j < n; ++j)
{
u = kn + j; a[u] *= alinv;
}
for (i = 0; i < k; ++i)
{
in = i * n;
for (j = 0; j < n; ++j)
if (j != k)
{
u = in + j; a[u] -= a[in + k] * a[kn + j];
} // end if (j!=k)
}
for (i = k + 1; i < n; ++i)
{
in = i * n;
for (j = 0; j < n; ++j)
if (j != k)
{
u = in + j; a[u] -= a[in + k] * a[kn + j];
} // end if (j!=k)
}
for (i = 0; i < k; ++i)
{
u = i * n + k; a[u] *= -alinv;
}
for (i = k + 1; i < n; ++i)
{
u = i * n + k; a[u] *= -alinv;
}
} // end for (k=0; k<n; ++k)
}// end if
}
HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr,
void *mgr_vdata, HYPRE_Int debug_flag)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int num_procs, my_id;
HYPRE_Int blk_size = (mgr_data -> block_size);
HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_ParCSRMatrix *B;
hypre_CSRMatrix *B_diag;
HYPRE_Real *B_diag_data;
HYPRE_Int *B_diag_i;
HYPRE_Int *B_diag_j;
hypre_CSRMatrix *B_offd;
HYPRE_Int i, ii;
HYPRE_Int j, jj;
HYPRE_Int k;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int n_block, left_size, inv_size;
// HYPRE_Real wall_time; /* for debugging instrumentation */
HYPRE_Int bidx, bidxm1, bidxp1;
HYPRE_Real * diaginv;
const HYPRE_Int nb2 = blk_size * blk_size;
HYPRE_Int block_scaling_error = 0;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
// HYPRE_Int num_threads = hypre_NumThreads();
//printf("n = %d\n",n);
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size * n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size * n_block;
}
inv_size = nb2 * n_block + left_size * left_size;
//printf("inv_size = %d\n",inv_size);
hypre_blockRelax_setup(A, blk_size, reserved_coarse_size, &(mgr_data -> diaginv));
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*-----------------------------------------------------------------------
* First Pass: Determine size of B and fill in
*-----------------------------------------------------------------------*/
B_diag_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST);
B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST);
B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
B_diag_i[n] = inv_size;
//B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
//B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
//B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST);
//B_offd_i[n] = 1;
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST);
//printf("n_block = %d\n",n_block);
for (i = 0; i < n_block; i++)
{
bidxm1 = i * blk_size;
bidxp1 = (i + 1) * blk_size;
for (k = 0; k < blk_size; k++)
{
for (j = 0; j < blk_size; j++)
{
bidx = k * blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = k * blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
/* for (k = 0;k < blk_size; k++) */
/* { */
/* for (j = 0;j < blk_size; j++) */
/* { */
/* bidx = k*blk_size + j; */
/* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */
/* } */
/* } */
hypre_blas_mat_inv(diaginv, blk_size);
for (k = 0; k < blk_size; k++)
{
B_diag_i[i * blk_size + k] = i * nb2 + k * blk_size;
//B_offd_i[i*nb2+k] = 0;
for (j = 0; j < blk_size; j++)
{
bidx = i * nb2 + k * blk_size + j;
B_diag_j[bidx] = i * blk_size + j;
B_diag_data[bidx] = diaginv[k * blk_size + j];
}
}
}
//printf("Before create\n");
B = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixRowStarts(A),
hypre_ParCSRMatrixColStarts(A),
0,
inv_size,
0);
//printf("After create\n");
B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrixData(B_diag) = B_diag_data;
hypre_CSRMatrixI(B_diag) = B_diag_i;
hypre_CSRMatrixJ(B_diag) = B_diag_j;
B_offd = hypre_ParCSRMatrixOffd(B);
hypre_CSRMatrixData(B_offd) = NULL;
hypre_CSRMatrixI(B_offd) = NULL;
hypre_CSRMatrixJ(B_offd) = NULL;
*B_ptr = B;
return (block_scaling_error);
}
HYPRE_Int hypre_blockRelax_solve (hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Real blk_size,
HYPRE_Int n_block,
HYPRE_Int left_size,
HYPRE_Int method,
HYPRE_Real *diaginv,
hypre_ParVector *Vtemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Int i, j, k;
HYPRE_Int ii, jj;
HYPRE_Int bidx, bidx1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, my_id;
HYPRE_Real *res;
const HYPRE_Int nb2 = blk_size * blk_size;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
// HYPRE_Int num_threads = hypre_NumThreads();
res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
//printf("u_old[%d] = %e\n",i,Vtemp_data[i]);
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax points block by block
*-----------------------------------------------------------------*/
for (i = 0; i < n_block; i++)
{
for (j = 0; j < blk_size; j++)
{
bidx = i * blk_size + j;
res[j] = f_data[bidx];
for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx + 1]; jj++)
{
ii = A_diag_j[jj];
if (method == 0)
{
// Jacobi for diagonal part
res[j] -= A_diag_data[jj] * Vtemp_data[ii];
}
else if (method == 1)
{
// Gauss-Seidel for diagonal part
res[j] -= A_diag_data[jj] * u_data[ii];
}
else
{
// Default do Jacobi for diagonal part
res[j] -= A_diag_data[jj] * Vtemp_data[ii];
}
//printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]);
}
for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx + 1]; jj++)
{
// always do Jacobi for off-diagonal part
ii = A_offd_j[jj];
res[j] -= A_offd_data[jj] * Vext_data[ii];
}
//printf("%d: res = %e\n",bidx,res[j]);
}
for (j = 0; j < blk_size; j++)
{
bidx1 = i * blk_size + j;
for (k = 0; k < blk_size; k++)
{
bidx = i * nb2 + j * blk_size + k;
u_data[bidx1] += res[k] * diaginv[bidx];
//printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]);
}
//printf("u[%d] = %e\n",bidx1,u_data[bidx1]);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(res, HYPRE_MEMORY_HOST);
return (relax_error);
}
HYPRE_Int hypre_block_gs (hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Real blk_size,
HYPRE_Int n_block,
HYPRE_Int left_size,
HYPRE_Real *diaginv,
hypre_ParVector *Vtemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Int i, j, k;
HYPRE_Int ii, jj;
HYPRE_Int bidx, bidx1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, my_id;
HYPRE_Real *res;
const HYPRE_Int nb2 = blk_size * blk_size;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//HYPRE_Int num_threads = hypre_NumThreads();
res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
//printf("u_old[%d] = %e\n",i,Vtemp_data[i]);
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax points block by block
*-----------------------------------------------------------------*/
for (i = 0; i < n_block; i++)
{
for (j = 0; j < blk_size; j++)
{
bidx = i * blk_size + j;
res[j] = f_data[bidx];
for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx + 1]; jj++)
{
ii = A_diag_j[jj];
//res[j] -= A_diag_data[jj] * Vtemp_data[ii];
//printf("my_id = %d, %d: Au = %e * %e\n",my_id,ii,A_diag_data[jj],Vtemp_data[ii]);
res[j] -= A_diag_data[jj] * u_data[ii];
//printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]);
}
for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx + 1]; jj++)
{
ii = A_offd_j[jj];
res[j] -= A_offd_data[jj] * Vext_data[ii];
}
//printf("%d: res = %e\n",bidx,res[j]);
}
for (j = 0; j < blk_size; j++)
{
bidx1 = i * blk_size + j;
for (k = 0; k < blk_size; k++)
{
bidx = i * nb2 + j * blk_size + k;
u_data[bidx1] += res[k] * diaginv[bidx];
//printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]);
}
//printf("u[%d] = %e\n",bidx1,u_data[bidx1]);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(res, HYPRE_MEMORY_HOST);
return (relax_error);
}
/*Block smoother*/
HYPRE_Int
hypre_blockRelax_setup(hypre_ParCSRMatrix *A,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Real **diaginvptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int i, j, k;
HYPRE_Int ii, jj;
HYPRE_Int bidx, bidxm1, bidxp1;
HYPRE_Int num_procs, my_id;
const HYPRE_Int nb2 = blk_size * blk_size;
HYPRE_Int n_block;
HYPRE_Int left_size, inv_size;
HYPRE_Real *diaginv = *diaginvptr;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//HYPRE_Int num_threads = hypre_NumThreads();
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size * n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size * n_block;
}
inv_size = nb2 * n_block + left_size * left_size;
if (diaginv != NULL)
{
hypre_TFree(diaginv, HYPRE_MEMORY_HOST);
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
}
else
{
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
for (i = 0; i < n_block; i++)
{
bidxm1 = i * blk_size;
bidxp1 = (i + 1) * blk_size;
//printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1);
for (k = 0; k < blk_size; k++)
{
for (j = 0; j < blk_size; j++)
{
bidx = i * nb2 + k * blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = i * nb2 + k * blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
}
for (i = 0; i < left_size; i++)
{
bidxm1 = n_block * nb2 + i * blk_size;
bidxp1 = n_block * nb2 + (i + 1) * blk_size;
for (j = 0; j < left_size; j++)
{
bidx = n_block * nb2 + i * blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[n_block * blk_size + i]; ii < A_diag_i[n_block * blk_size + i + 1]; ii++)
{
jj = A_diag_j[ii];
if (jj > n_block * blk_size)
{
bidx = n_block * nb2 + i * blk_size + jj - n_block * blk_size;
diaginv[bidx] = A_diag_data[ii];
}
}
}
/*-----------------------------------------------------------------
* compute the inverses of all the diagonal sub-blocks
*-----------------------------------------------------------------*/
if (blk_size > 1)
{
for (i = 0; i < n_block; i++)
{
hypre_blas_mat_inv(diaginv + i * nb2, blk_size);
}
hypre_blas_mat_inv(diaginv + (HYPRE_Int)(blk_size * nb2), left_size);
}
else
{
for (i = 0; i < n; i++)
{
// FIX-ME: zero-diagonal should be tested previously
if (fabs(diaginv[i]) < SMALLREAL)
{
diaginv[i] = 0.0;
}
else
{
diaginv[i] = 1.0 / diaginv[i];
}
}
}
*diaginvptr = diaginv;
return 1;
}
HYPRE_Int
hypre_blockRelax(hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Int method,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int i, j, k;
HYPRE_Int ii, jj;
HYPRE_Int bidx, bidxm1, bidxp1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_procs, my_id;
const HYPRE_Int nb2 = blk_size * blk_size;
HYPRE_Int n_block;
HYPRE_Int left_size, inv_size;
HYPRE_Real *diaginv;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//HYPRE_Int num_threads = hypre_NumThreads();
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size * n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size * n_block;
}
inv_size = nb2 * n_block + left_size * left_size;
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
for (i = 0; i < n_block; i++)
{
bidxm1 = i * blk_size;
bidxp1 = (i + 1) * blk_size;
//printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1);
for (k = 0; k < blk_size; k++)
{
for (j = 0; j < blk_size; j++)
{
bidx = i * nb2 + k * blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = i * nb2 + k * blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
}
for (i = 0; i < left_size; i++)
{
bidxm1 = n_block * nb2 + i * blk_size;
bidxp1 = n_block * nb2 + (i + 1) * blk_size;
for (j = 0; j < left_size; j++)
{
bidx = n_block * nb2 + i * blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[n_block * blk_size + i]; ii < A_diag_i[n_block * blk_size + i + 1]; ii++)
{
jj = A_diag_j[ii];
if (jj > n_block * blk_size)
{
bidx = n_block * nb2 + i * blk_size + jj - n_block * blk_size;
diaginv[bidx] = A_diag_data[ii];
}
}
}
/*
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
for (k = 0;k < blk_size; k ++)
{
bidx = i*nb2 + j*blk_size + k;
printf("%e\t",diaginv[bidx]);
}
printf("\n");
}
printf("\n");
}
*/
/*-----------------------------------------------------------------
* compute the inverses of all the diagonal sub-blocks
*-----------------------------------------------------------------*/
if (blk_size > 1)
{
for (i = 0; i < n_block; i++)
{
hypre_blas_mat_inv(diaginv + i * nb2, blk_size);
}
hypre_blas_mat_inv(diaginv + (HYPRE_Int)(blk_size * nb2), left_size);
/*
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
for (k = 0;k < blk_size; k ++)
{
bidx = i*nb2 + j*blk_size + k;
printf("%e\t",diaginv[bidx]);
}
printf("\n");
}
printf("\n");
}
*/
}
else
{
for (i = 0; i < n; i++)
{
// FIX-ME: zero-diagonal should be tested previously
if (fabs(diaginv[i]) < SMALLREAL)
{
diaginv[i] = 0.0;
}
else
{
diaginv[i] = 1.0 / diaginv[i];
}
}
}
hypre_blockRelax_solve(A, f, u, blk_size, n_block, left_size, method, diaginv, Vtemp);
/*-----------------------------------------------------------------
* Free temperary memeory
*-----------------------------------------------------------------*/
hypre_TFree(diaginv, HYPRE_MEMORY_HOST);
return (relax_error);
}
/* set coarse grid solver */
HYPRE_Int
hypre_MGRSetFSolver( void *mgr_vdata,
HYPRE_Int (*fine_grid_solver_solve)(void*, void*, void*, void*),
HYPRE_Int (*fine_grid_solver_setup)(void*, void*, void*, void*),
void *fsolver )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
HYPRE_Solver **aff_solver = (mgr_data -> aff_solver);
if (aff_solver == NULL)
{
aff_solver = hypre_CTAlloc(HYPRE_Solver*, max_num_coarse_levels, HYPRE_MEMORY_HOST);
}
/* only allow to set F-solver for the first level */
aff_solver[0] = (HYPRE_Solver *) fsolver;
(mgr_data -> fine_grid_solver_solve) = fine_grid_solver_solve;
(mgr_data -> fine_grid_solver_setup) = fine_grid_solver_setup;
(mgr_data -> aff_solver) = aff_solver;
(mgr_data -> use_default_fsolver) = 0;
return hypre_error_flag;
}
/* set coarse grid solver */
HYPRE_Int
hypre_MGRSetCoarseSolver( void *mgr_vdata,
HYPRE_Int (*coarse_grid_solver_solve)(void*, void*, void*, void*),
HYPRE_Int (*coarse_grid_solver_setup)(void*, void*, void*, void*),
void *coarse_grid_solver )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
(mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve;
(mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup;
(mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver;
(mgr_data -> use_default_cgrid_solver) = 0;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRSetAffInv( void *mgr_vdata,
hypre_ParCSRMatrix *A_ff_inv )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> A_ff_inv) = A_ff_inv;
return hypre_error_flag;
}
/* Set the maximum number of coarse levels.
* maxcoarselevs = 1 yields the default 2-grid scheme.
*/
HYPRE_Int
hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> max_num_coarse_levels) = maxcoarselevs;
return hypre_error_flag;
}
/* Set the system block size */
HYPRE_Int
hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> block_size) = bsize;
return hypre_error_flag;
}
/* Set the relaxation type for the fine levels of the reduction.
* Currently supports the following flavors of relaxation types
* as described in the documentation:
* relax_types 0 - 8, 13, 14, 18, 19, 98.
* See par_relax.c and par_relax_more.c for more details.
*
*/
HYPRE_Int
hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> relax_type) = relax_type;
return hypre_error_flag;
}
/* Set the number of relaxation sweeps */
HYPRE_Int
hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_relax_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set the F-relaxation strategy: 0=single level, 1=multi level */
HYPRE_Int
hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> Frelax_method) != NULL)
{
hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_method) = NULL;
}
HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_method[i] = relax_method;
}
(mgr_data -> Frelax_method) = Frelax_method;
return hypre_error_flag;
}
/* Set the F-relaxation strategy: 0=single level, 1=multi level */
HYPRE_Int
hypre_MGRSetLevelFRelaxMethod( void *mgr_vdata, HYPRE_Int *relax_method )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> Frelax_method) != NULL)
{
hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_method) = NULL;
}
HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (relax_method != NULL)
{
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_method[i] = relax_method[i];
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_method[i] = 0;
}
}
(mgr_data -> Frelax_method) = Frelax_method;
return hypre_error_flag;
}
/* Coarse grid method: 0=Galerkin RAP, 1=non-Galerkin with dropping*/
HYPRE_Int
hypre_MGRSetCoarseGridMethod( void *mgr_vdata, HYPRE_Int *cg_method )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> use_non_galerkin_cg) != NULL)
{
hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST);
(mgr_data -> use_non_galerkin_cg) = NULL;
}
HYPRE_Int *use_non_galerkin_cg = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (cg_method != NULL)
{
for (i = 0; i < max_num_coarse_levels; i++)
{
use_non_galerkin_cg[i] = cg_method[i];
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
use_non_galerkin_cg[i] = 0;
}
}
(mgr_data -> use_non_galerkin_cg) = use_non_galerkin_cg;
return hypre_error_flag;
}
/* Set the F-relaxation number of functions for each level */
HYPRE_Int
hypre_MGRSetLevelFRelaxNumFunctions( void *mgr_vdata, HYPRE_Int *num_functions )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> Frelax_num_functions) != NULL)
{
hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_num_functions) = NULL;
}
HYPRE_Int *Frelax_num_functions = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels,
HYPRE_MEMORY_HOST);
if (num_functions != NULL)
{
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_num_functions[i] = num_functions[i];
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_num_functions[i] = 1;
}
}
(mgr_data -> Frelax_num_functions) = Frelax_num_functions;
return hypre_error_flag;
}
/* Set the type of the restriction type
* for computing restriction operator
*/
HYPRE_Int
hypre_MGRSetLevelRestrictType( void *mgr_vdata, HYPRE_Int *restrict_type)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> restrict_type) != NULL)
{
hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST);
(mgr_data -> restrict_type) = NULL;
}
HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (restrict_type != NULL)
{
for (i = 0; i < max_num_coarse_levels; i++)
{
level_restrict_type[i] = *(restrict_type + i);
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
level_restrict_type[i] = 0;
}
}
(mgr_data -> restrict_type) = level_restrict_type;
return hypre_error_flag;
}
/* Set the type of the restriction type
* for computing restriction operator
*/
HYPRE_Int
hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> restrict_type) != NULL)
{
hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST);
(mgr_data -> restrict_type) = NULL;
}
HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_coarse_levels; i++)
{
level_restrict_type[i] = restrict_type;
}
(mgr_data -> restrict_type) = level_restrict_type;
return hypre_error_flag;
}
/* Set the number of Jacobi interpolation iterations
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_restrict_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set the type of the interpolation
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> interp_type) != NULL)
{
hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST);
(mgr_data -> interp_type) = NULL;
}
HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_coarse_levels; i++)
{
level_interp_type[i] = interpType;
}
(mgr_data -> interp_type) = level_interp_type;
return hypre_error_flag;
}
/* Set the type of the interpolation
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetLevelInterpType( void *mgr_vdata, HYPRE_Int *interpType)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> interp_type) != NULL)
{
hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST);
(mgr_data -> interp_type) = NULL;
}
HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (interpType != NULL)
{
for (i = 0; i < max_num_coarse_levels; i++)
{
level_interp_type[i] = *(interpType + i);
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
level_interp_type[i] = 2;
}
}
(mgr_data -> interp_type) = level_interp_type;
return hypre_error_flag;
}
/* Set the number of Jacobi interpolation iterations
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_interp_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set the threshold to truncate the coarse grid at each
* level of reduction
*/
HYPRE_Int
hypre_MGRSetTruncateCoarseGridThreshold( void *mgr_vdata, HYPRE_Real threshold)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> truncate_coarse_grid_threshold) = threshold;
return hypre_error_flag;
}
/* Set print level for F-relaxation solver */
HYPRE_Int
hypre_MGRSetFrelaxPrintLevel( void *mgr_vdata, HYPRE_Int print_level )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> frelax_print_level) = print_level;
return hypre_error_flag;
}
/* Set print level for coarse grid solver */
HYPRE_Int
hypre_MGRSetCoarseGridPrintLevel( void *mgr_vdata, HYPRE_Int print_level )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> cg_print_level) = print_level;
return hypre_error_flag;
}
/* Set print level for mgr solver */
HYPRE_Int
hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> print_level) = print_level;
return hypre_error_flag;
}
/* Set logging level for mgr solver */
HYPRE_Int
hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> logging) = logging;
return hypre_error_flag;
}
/* Set max number of iterations for mgr solver */
HYPRE_Int
hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> max_iter) = max_iter;
return hypre_error_flag;
}
/* Set convergence tolerance for mgr solver */
HYPRE_Int
hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> tol) = tol;
return hypre_error_flag;
}
/* Set max number of iterations for mgr global smoother */
HYPRE_Int
hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> global_smooth_iters) = max_iter;
return hypre_error_flag;
}
/* Set global smoothing type for mgr solver */
HYPRE_Int
hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> global_smooth_type) = iter_type;
return hypre_error_flag;
}
/* Set the maximum number of non-zero entries for restriction
and interpolation operator if classical AMG interpolation is used */
HYPRE_Int
hypre_MGRSetPMaxElmts( void *mgr_vdata, HYPRE_Int P_max_elmts)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> P_max_elmts) = P_max_elmts;
return hypre_error_flag;
}
/* Get number of iterations for MGR solver */
HYPRE_Int
hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*num_iterations = mgr_data->num_iterations;
return hypre_error_flag;
}
/* Get residual norms for MGR solver */
HYPRE_Int
hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*res_norm = mgr_data->final_rel_residual_norm;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRGetCoarseGridConvergenceFactor( void *mgr_vdata, HYPRE_Real *conv_factor )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*conv_factor = (mgr_data -> cg_convergence_factor);
return hypre_error_flag;
}
/* Build A_FF matrix from A given a CF_marker array */
HYPRE_Int
hypre_MGRGetSubBlock( hypre_ParCSRMatrix *A,
HYPRE_Int *row_cf_marker,
HYPRE_Int *col_cf_marker,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **A_block_ptr )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
//HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_IntArray *coarse_dof_func_ptr = NULL;
HYPRE_BigInt num_row_cpts_global[2];
HYPRE_BigInt num_col_cpts_global[2];
hypre_ParCSRMatrix *Ablock;
HYPRE_BigInt *col_map_offd_Ablock;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *Ablock_diag;
hypre_CSRMatrix *Ablock_offd;
HYPRE_Real *Ablock_diag_data;
HYPRE_Int *Ablock_diag_i;
HYPRE_Int *Ablock_diag_j;
HYPRE_Real *Ablock_offd_data;
HYPRE_Int *Ablock_offd_i;
HYPRE_Int *Ablock_offd_j;
HYPRE_Int Ablock_diag_size, Ablock_offd_size;
HYPRE_Int *Ablock_marker;
HYPRE_Int ii_counter;
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int start_indexing = 0; /* start indexing for Aff_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *coarse_counter;
HYPRE_Int *col_coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_row_cpts;
HYPRE_BigInt total_global_col_cpts;
HYPRE_Int num_cols_Ablock_offd;
// HYPRE_BigInt my_first_row_cpt, my_first_col_cpt;
HYPRE_Int i, i1;
HYPRE_Int j, jl, jj;
HYPRE_Int start;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
hypre_IntArray *wrap_cf;
// HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//num_threads = hypre_NumThreads();
// Temporary fix, disable threading
// TODO: enable threading
num_threads = 1;
/* get the number of coarse rows */
wrap_cf = hypre_IntArrayCreate(local_numrows);
hypre_IntArrayMemoryLocation(wrap_cf) = HYPRE_MEMORY_HOST;
hypre_IntArrayData(wrap_cf) = row_cf_marker;
hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, wrap_cf, &coarse_dof_func_ptr,
num_row_cpts_global);
hypre_IntArrayDestroy(coarse_dof_func_ptr);
coarse_dof_func_ptr = NULL;
//hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_row_cpts_global[0], num_row_cpts_global[1]);
// my_first_row_cpt = num_row_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_row_cpts = num_row_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_row_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
/* get the number of coarse rows */
hypre_IntArrayData(wrap_cf) = col_cf_marker;
hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, wrap_cf, &coarse_dof_func_ptr,
num_col_cpts_global);
hypre_IntArrayDestroy(coarse_dof_func_ptr);
coarse_dof_func_ptr = NULL;
//hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_col_cpts_global[0], num_col_cpts_global[1]);
// my_first_col_cpt = num_col_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_col_cpts = num_col_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_col_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); }
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++)
int_buf_data[index++]
= col_cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
/*-----------------------------------------------------------------------
* First Pass: Determine size of Ablock and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
col_coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; }
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (j < rest)
{
ns = j * size + j;
ne = (j + 1) * size + j + 1;
}
else
{
ns = j * size + rest;
ne = (j + 1) * size + rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a F-point, we loop through the columns and select
* the F-columns. Also set up mapping vector.
*--------------------------------------------------------------------*/
if (col_cf_marker[i] > 0)
{
fine_to_coarse[i] = col_coarse_counter[j];
col_coarse_counter[j]++;
}
if (row_cf_marker[i] > 0)
{
//fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
if (col_cf_marker[i1] > 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i = 0; i < num_threads - 1; i++)
{
jj_count[i + 1] += jj_count[i];
jj_count_offd[i + 1] += jj_count_offd[i];
coarse_counter[i + 1] += coarse_counter[i];
col_coarse_counter[i + 1] += col_coarse_counter[i];
}
i = num_threads - 1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
ii_counter = coarse_counter[i];
Ablock_diag_size = jj_counter;
Ablock_diag_i = hypre_CTAlloc(HYPRE_Int, ii_counter + 1, memory_location);
Ablock_diag_j = hypre_CTAlloc(HYPRE_Int, Ablock_diag_size, memory_location);
Ablock_diag_data = hypre_CTAlloc(HYPRE_Real, Ablock_diag_size, memory_location);
Ablock_diag_i[ii_counter] = jj_counter;
Ablock_offd_size = jj_counter_offd;
Ablock_offd_i = hypre_CTAlloc(HYPRE_Int, ii_counter + 1, memory_location);
Ablock_offd_j = hypre_CTAlloc(HYPRE_Int, Ablock_offd_size, memory_location);
Ablock_offd_data = hypre_CTAlloc(HYPRE_Real, Ablock_offd_size, memory_location);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
//-----------------------------------------------------------------------
// Send and receive fine_to_coarse info.
//-----------------------------------------------------------------------
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) { coarse_shift = col_coarse_counter[j - 1]; }
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (j < rest)
{
ns = j * size + j;
ne = (j + 1) * size + j + 1;
}
else
{
ns = j * size + rest;
ne = (j + 1) * size + rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
// for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt;
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (jl < rest)
{
ns = jl * size + jl;
ne = (jl + 1) * size + jl + 1;
}
else
{
ns = jl * size + rest;
ne = (jl + 1) * size + rest;
}
jj_counter = 0;
if (jl > 0) { jj_counter = jj_count[jl - 1]; }
jj_counter_offd = 0;
if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; }
ii_counter = 0;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a F-point, we loop through the columns and select
* the F-columns. Also set up mapping vector.
*--------------------------------------------------------------------*/
if (row_cf_marker[i] > 0)
{
// Diagonal part of Ablock //
Ablock_diag_i[ii_counter] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++)
{
i1 = A_diag_j[jj];
if (col_cf_marker[i1] > 0)
{
Ablock_diag_j[jj_counter] = fine_to_coarse[i1];
Ablock_diag_data[jj_counter] = A_diag_data[jj];
jj_counter++;
}
}
// Off-Diagonal part of Ablock //
Ablock_offd_i[ii_counter] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
Ablock_offd_j[jj_counter_offd] = i1;
Ablock_offd_data[jj_counter_offd] = A_offd_data[jj];
jj_counter_offd++;
}
}
}
ii_counter++;
}
}
Ablock_offd_i[ii_counter] = jj_counter_offd;
Ablock_diag_i[ii_counter] = jj_counter;
}
Ablock = hypre_ParCSRMatrixCreate(comm,
total_global_row_cpts,
total_global_col_cpts,
num_row_cpts_global,
num_col_cpts_global,
0,
Ablock_diag_i[ii_counter],
Ablock_offd_i[ii_counter]);
Ablock_diag = hypre_ParCSRMatrixDiag(Ablock);
hypre_CSRMatrixData(Ablock_diag) = Ablock_diag_data;
hypre_CSRMatrixI(Ablock_diag) = Ablock_diag_i;
hypre_CSRMatrixJ(Ablock_diag) = Ablock_diag_j;
Ablock_offd = hypre_ParCSRMatrixOffd(Ablock);
hypre_CSRMatrixData(Ablock_offd) = Ablock_offd_data;
hypre_CSRMatrixI(Ablock_offd) = Ablock_offd_i;
hypre_CSRMatrixJ(Ablock_offd) = Ablock_offd_j;
num_cols_Ablock_offd = 0;
if (Ablock_offd_size)
{
Ablock_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < num_cols_A_offd; i++)
{
Ablock_marker[i] = 0;
}
num_cols_Ablock_offd = 0;
for (i = 0; i < Ablock_offd_size; i++)
{
index = Ablock_offd_j[i];
if (!Ablock_marker[index])
{
num_cols_Ablock_offd++;
Ablock_marker[index] = 1;
}
}
col_map_offd_Ablock = hypre_CTAlloc(HYPRE_BigInt, num_cols_Ablock_offd, memory_location);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_Ablock_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_cols_Ablock_offd; i++)
{
while (Ablock_marker[index] == 0) { index++; }
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < Ablock_offd_size; i++)
Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
Ablock_offd_j[i],
num_cols_Ablock_offd);
hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST);
}
if (num_cols_Ablock_offd)
{
hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock;
hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd;
}
hypre_GetCommPkgRTFromCommPkgA(Ablock, A, fine_to_coarse, tmp_map_offd);
/* Create the assumed partition */
if (hypre_ParCSRMatrixAssumedPartition(Ablock) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(Ablock);
}
*A_block_ptr = Ablock;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(col_coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return (0);
}
/* Build A_FF matrix from A given a CF_marker array */
HYPRE_Int
hypre_MGRBuildAff( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **A_ff_ptr )
{
HYPRE_Int i;
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
/* create a copy of the CF_marker array and switch C-points to F-points */
HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < local_numrows; i++)
{
CF_marker_copy[i] = -CF_marker[i];
}
hypre_MGRGetSubBlock(A, CF_marker_copy, CF_marker_copy, debug_flag, A_ff_ptr);
/* Free copy of CF marker */
hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST);
return (0);
}
/*********************************************************************************
* This routine assumes that the 'toVector' is larger than the 'fromVector' and
* the CF_marker is of the same length as the toVector. There must be n 'point_type'
* values in the CF_marker, where n is the length of the 'fromVector'.
* It adds the values of the 'fromVector' to the 'toVector' where the marker is the
* same as the 'point_type'
*********************************************************************************/
HYPRE_Int
hypre_MGRAddVectorP ( HYPRE_Int *CF_marker,
HYPRE_Int point_type,
HYPRE_Real a,
hypre_ParVector *fromVector,
HYPRE_Real b,
hypre_ParVector **toVector )
{
hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector);
HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal);
hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector);
HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal);
HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector);
HYPRE_Int i, j;
j = 0;
for (i = 0; i < n; i++)
{
if (CF_marker[i] == point_type)
{
toVectorData[i] = b * toVectorData[i] + a * fromVectorData[j];
j++;
}
}
return 0;
}
/*************************************************************************************
* This routine assumes that the 'fromVector' is larger than the 'toVector' and
* the CF_marker is of the same length as the fromVector. There must be n 'point_type'
* values in the CF_marker, where n is the length of the 'toVector'.
* It adds the values of the 'fromVector' where the marker is the
* same as the 'point_type' to the 'toVector'
*************************************************************************************/
HYPRE_Int
hypre_MGRAddVectorR ( HYPRE_Int *CF_marker,
HYPRE_Int point_type,
HYPRE_Real a,
hypre_ParVector *fromVector,
HYPRE_Real b,
hypre_ParVector **toVector )
{
hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector);
HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal);
hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector);
HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal);
HYPRE_Int n = hypre_ParVectorActualLocalSize(fromVector);
HYPRE_Int i, j;
j = 0;
for (i = 0; i < n; i++)
{
if (CF_marker[i] == point_type)
{
toVectorData[j] = b * toVectorData[j] + a * fromVectorData[i];
j++;
}
}
return 0;
}
/*
HYPRE_Int
hypre_MGRBuildAffRAP( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions,
HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr,
hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr )
{
HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST);
HYPRE_Int i;
for (i = 0; i < local_num_variables; i++) {
CF_marker_copy[i] = -CF_marker[i];
}
hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr);
hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr);
hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr);
hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST);
return 0;
}
*/
/* Get pointer to coarse grid matrix for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> RAP == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
" Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n");
return hypre_error_flag;
}
*RAP = mgr_data->RAP;
return hypre_error_flag;
}
/* Get pointer to coarse grid solution for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> U_array == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
" MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n");
return hypre_error_flag;
}
*sol = mgr_data->U_array[mgr_data->num_coarse_levels];
return hypre_error_flag;
}
/* Get pointer to coarse grid solution for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> F_array == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
" MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n");
return hypre_error_flag;
}
*rhs = mgr_data->F_array[mgr_data->num_coarse_levels];
return hypre_error_flag;
}
/* Print coarse grid linear system (for debugging)*/
HYPRE_Int
hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
mgr_data->print_coarse_system = print_flag;
return hypre_error_flag;
}
/* Print solver params */
HYPRE_Int
hypre_MGRWriteSolverParams(void *mgr_vdata)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i, j;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
hypre_printf("MGR Setup parameters: \n");
hypre_printf("Block size: %d\n", (mgr_data -> block_size));
hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels));
hypre_printf("Relax type: %d\n", (mgr_data -> relax_type));
hypre_printf("Set non-Cpoints to F-points: %d\n", (mgr_data -> set_non_Cpoints_to_F));
hypre_printf("Set Cpoints method: %d\n", (mgr_data -> set_c_points_method));
for (i = 0; i < max_num_coarse_levels; i++)
{
hypre_printf("Lev = %d, Interpolation type: %d\n", i, (mgr_data -> interp_type)[i]);
hypre_printf("Lev = %d, Restriction type: %d\n", i, (mgr_data -> restrict_type)[i]);
hypre_printf("Lev = %d, F-relaxation method: %d\n", i, (mgr_data -> Frelax_method)[i]);
hypre_printf("Lev = %d, Use non-Galerkin coarse grid: %d\n", i,
(mgr_data -> use_non_galerkin_cg)[i]);
HYPRE_Int lvl_num_coarse_points = (mgr_data -> block_num_coarse_indexes)[i];
hypre_printf("Lev = %d, Number of Cpoints: %d\n", i, lvl_num_coarse_points);
hypre_printf("Cpoints indices: ");
for (j = 0; j < lvl_num_coarse_points; j++)
{
if ((mgr_data -> block_cf_marker)[i][j] == 1)
{
hypre_printf("%d ", j);
}
}
hypre_printf("\n");
}
hypre_printf("Number of Reserved Cpoints: %d\n", (mgr_data -> reserved_coarse_size));
hypre_printf("Keep reserved Cpoints to level: %d\n", (mgr_data -> lvl_to_keep_cpoints));
hypre_printf("\n MGR Solver Parameters: \n");
hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps));
hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps));
hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps));
hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type));
hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters));
hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter));
hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol));
hypre_printf("Use default coarse grid solver: %d\n", (mgr_data -> use_default_cgrid_solver));
if ((mgr_data -> use_default_fsolver) >= 0)
{
hypre_printf("Use default AMG solver for full AMG F-relaxation: %d\n",
(mgr_data -> use_default_fsolver));
}
return hypre_error_flag;
}
#ifdef HYPRE_USING_DSUPERLU
void *
hypre_MGRDirectSolverCreate()
{
hypre_DSLUData *dslu_data = hypre_CTAlloc(hypre_DSLUData, 1, HYPRE_MEMORY_HOST);
return (void *) dslu_data;
}
HYPRE_Int
hypre_MGRDirectSolverSetup( void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
/* Par Data Structure variables */
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_local;
HYPRE_Int num_rows;
HYPRE_Int num_procs, my_id;
HYPRE_Int pcols = 1, prows = 1;
HYPRE_BigInt *big_rowptr = NULL;
hypre_DSLUData *dslu_data = (hypre_DSLUData *) solver;
HYPRE_Int info = 0;
HYPRE_Int nrhs = 0;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
/* Merge diag and offd into one matrix (global ids) */
A_local = hypre_MergeDiagAndOffd(A);
num_rows = hypre_CSRMatrixNumRows(A_local);
/* Now convert hypre matrix to a SuperMatrix */
#ifdef HYPRE_MIXEDINT
{
HYPRE_Int *rowptr = NULL;
HYPRE_Int i;
rowptr = hypre_CSRMatrixI(A_local);
big_rowptr = hypre_CTAlloc(HYPRE_BigInt, (num_rows + 1), HYPRE_MEMORY_HOST);
for (i = 0; i < (num_rows + 1); i++)
{
big_rowptr[i] = (HYPRE_BigInt)rowptr[i];
}
}
#else
big_rowptr = hypre_CSRMatrixI(A_local);
#endif
dCreate_CompRowLoc_Matrix_dist(
&(dslu_data->A_dslu), global_num_rows, global_num_rows,
hypre_CSRMatrixNumNonzeros(A_local),
num_rows,
hypre_ParCSRMatrixFirstRowIndex(A),
hypre_CSRMatrixData(A_local),
hypre_CSRMatrixBigJ(A_local), big_rowptr,
SLU_NR_loc, SLU_D, SLU_GE);
/* DOK: SuperLU frees assigned data, so set them to null before
* calling hypre_CSRMatrixdestroy on A_local to avoid memory errors.
*/
#ifndef HYPRE_MIXEDINT
hypre_CSRMatrixI(A_local) = NULL;
#endif
hypre_CSRMatrixData(A_local) = NULL;
hypre_CSRMatrixBigJ(A_local) = NULL;
hypre_CSRMatrixDestroy(A_local);
/*Create process grid */
while (prows * pcols <= num_procs) { ++prows; }
--prows;
pcols = num_procs / prows;
while (prows * pcols != num_procs)
{
prows -= 1;
pcols = num_procs / prows;
}
//hypre_printf(" prows %d pcols %d\n", prows, pcols);
superlu_gridinit(comm, prows, pcols, &(dslu_data->dslu_data_grid));
set_default_options_dist(&(dslu_data->dslu_options));
dslu_data->dslu_options.Fact = DOFACT;
dslu_data->dslu_options.PrintStat = NO;
/*dslu_data->dslu_options.IterRefine = SLU_DOUBLE;
dslu_data->dslu_options.ColPerm = MMD_AT_PLUS_A;
dslu_data->dslu_options.DiagPivotThresh = 1.0;
dslu_data->dslu_options.ReplaceTinyPivot = NO; */
dScalePermstructInit(global_num_rows, global_num_rows, &(dslu_data->dslu_ScalePermstruct));
dLUstructInit(global_num_rows, &(dslu_data->dslu_data_LU));
PStatInit(&(dslu_data->dslu_data_stat));
dslu_data->global_num_rows = global_num_rows;
dslu_data->berr = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST);
dslu_data->berr[0] = 0.0;
pdgssvx(&(dslu_data->dslu_options), &(dslu_data->A_dslu),
&(dslu_data->dslu_ScalePermstruct), NULL, num_rows, nrhs,
&(dslu_data->dslu_data_grid), &(dslu_data->dslu_data_LU),
&(dslu_data->dslu_solve), dslu_data->berr, &(dslu_data->dslu_data_stat), &info);
dslu_data->dslu_options.Fact = FACTORED;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRDirectSolverSolve( void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
hypre_SLUDistSolve(solver, f, u);
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRDirectSolverDestroy( void *solver )
{
hypre_SLUDistDestroy(solver);
return hypre_error_flag;
}
#endif
|
basic_omp.c | // RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t
// RUN: diff <(mpirun -np 4 %t) %s.reference_output
#include <stdio.h>
#include <omp.h>
int main()
{
int* arr = (int*) malloc(sizeof(int) * 4);
arr[0] = 42;
arr[1] = 42;
arr[2] = 42;
arr[3] = 42;
#pragma omp parallel shared(arr)
{
int rank = omp_get_thread_num();
arr[rank] = rank;
}
printf("AFTER: [%d, %d, %d, %d]\n", arr[0], arr[1], arr[2], arr[3]);
}
|
ompblas.c |
#include <math.h>
#include <omp.h>
#include <stddef.h>
//#define ptrdiff_t int
int daxpy_(ptrdiff_t *n, double *sa, double *sx, ptrdiff_t *incx, double *sy, ptrdiff_t *incy);
double ddot_(ptrdiff_t *n, double *sx, ptrdiff_t *incx, double *sy, ptrdiff_t *incy);
int dscal_(ptrdiff_t *n, double *sa, double *sx, ptrdiff_t *incx);
double dnrm2_(ptrdiff_t *n, double *x, ptrdiff_t *incx);
int dcopy_(ptrdiff_t *n, double *sx, ptrdiff_t *incx, double *sy, ptrdiff_t *incy);
int dcopy_(ptrdiff_t *n, double *sx, ptrdiff_t *incx, double *sy, ptrdiff_t *incy)
{
ptrdiff_t i, m, ix, iy, nn, iincx, iincy;
/* Dereference inputs */
nn = *n;
iincx = *incx;
iincy = *incy;
if(nn > 0)
{
if (iincx == 1 && iincy == 1) /* code for both increments equal to 1 */
{
m = nn-7;
/*
for(i=0; i < m; i += 8)
{
sy[i] = sx[i]; sy[i+1] = sx[i+1]; sy[i+2] = sx[i+2]; sy[i+3] = sx[i+3];
sy[i+4] = sx[i+4]; sy[i+5] = sx[i+5]; sy[i+6] = sx[i+6]; sy[i+7] = sx[i+7];
}
for ( ; i < nn; ++i)
sy[i] = sx[i];
*/
#pragma omp parallel for private(i)
for (i=0 ; i < nn; ++i) /* clean-up loop */
sy[i] = sx[i];
return 0;
}
else
{
ix = iincx >= 0 ? 0 : (1 - nn) * iincx;
iy = iincy >= 0 ? 0 : (1 - nn) * iincy;
for (i=0; i < nn; i++)
{
sy[iy] = sx[ix];
ix += iincx;
iy += iincy;
}
return 0;
}
}
return 0;
}
int daxpy_(ptrdiff_t *n, double *sa, double *sx, ptrdiff_t *incx, double *sy,
ptrdiff_t *incy)
{
ptrdiff_t i, m, ix, iy, nn, iincx, iincy;
register double ssa;
/* constant times a vector plus a vector.
uses unrolled loop for increments equal to one.
jack dongarra, linpack, 3/11/78.
modified 12/3/93, array(1) declarations changed to array(*) */
/* Dereference inputs */
nn = *n;
ssa = *sa;
iincx = *incx;
iincy = *incy;
if( nn > 0 && ssa != 0.0 )
{
if (iincx == 1 && iincy == 1) /* code for both increments equal to 1 */
{
m = nn-3;
/*
for (i = 0; i < m; i += 4)
{
sy[i] += ssa * sx[i];
sy[i+1] += ssa * sx[i+1];
sy[i+2] += ssa * sx[i+2];
sy[i+3] += ssa * sx[i+3];
}
for ( ; i < nn; ++i)
sy[i] += ssa * sx[i];
*/
#pragma omp parallel for private(i)
for (i=0 ; i < nn; ++i) /* clean-up loop */
sy[i] += ssa * sx[i];
}
else /* code for unequal increments or equal increments not equal to 1 */
{
ix = iincx >= 0 ? 0 : (1 - nn) * iincx;
iy = iincy >= 0 ? 0 : (1 - nn) * iincy;
for (i = 0; i < nn; i++)
{
sy[iy] += ssa * sx[ix];
ix += iincx;
iy += iincy;
}
}
}
return 0;
} /* daxpy_ */
double ddot_(ptrdiff_t *n, double *sx, ptrdiff_t *incx, double *sy, ptrdiff_t *incy)
{
ptrdiff_t i, m, nn, iincx, iincy;
double stemp;
ptrdiff_t ix, iy;
/* forms the dot product of two vectors.
uses unrolled loops for increments equal to one.
jack dongarra, linpack, 3/11/78.
modified 12/3/93, array(1) declarations changed to array(*) */
/* Dereference inputs */
nn = *n;
iincx = *incx;
iincy = *incy;
stemp = 0.0;
if (nn > 0)
{
if (iincx == 1 && iincy == 1) /* code for both increments equal to 1 */
{
m = nn-4;
/*
//#pragma omp parallel for private(i) reduction(+:stemp) schedule(static,1)
for (i = 0; i < m; i += 5) {
stemp += sx[i] * sy[i] + sx[i+1] * sy[i+1] + sx[i+2] * sy[i+2] +
sx[i+3] * sy[i+3] + sx[i+4] * sy[i+4];
}
for ( ; i < nn; i++)
stemp += sx[i] * sy[i];
*/
#pragma omp parallel for private(i) reduction(+:stemp) schedule(static,1)
for(i = 0; i < m; i += 1)
stemp += sx[i]*sy[i];
}
else /* code for unequal increments or equal increments not equal to 1 */
{
ix = 0;
iy = 0;
if (iincx < 0)
ix = (1 - nn) * iincx;
if (iincy < 0)
iy = (1 - nn) * iincy;
for (i = 0; i < nn; i++)
{
stemp += sx[ix] * sy[iy];
ix += iincx;
iy += iincy;
}
}
}
return stemp;
} /* ddot_ */
double dnrm2_(ptrdiff_t *n, double *x, ptrdiff_t *incx)
{
ptrdiff_t ix, nn, iincx;
double norm, scale, absxi, ssq, temp;
/* DNRM2 returns the euclidean norm of a vector via the function
name, so that
DNRM2 := sqrt( x'*x )
-- This version written on 25-October-1982.
Modified on 14-October-1993 to inline the call to SLASSQ.
Sven Hammarling, Nag Ltd. */
/* Dereference inputs */
nn = *n;
iincx = *incx;
if( nn > 0 && iincx > 0 )
{
if (nn == 1)
{
norm = fabs(x[0]);
}
else
{
scale = 0.0;
ssq = 1.0;
/* The following loop is equivalent to this call to the LAPACK
auxiliary routine: CALL SLASSQ( N, X, INCX, SCALE, SSQ ) */
for (ix=(nn-1)*iincx; ix>=0; ix-=iincx)
{
if (x[ix] != 0.0)
{
absxi = fabs(x[ix]);
if (scale < absxi)
{
temp = scale / absxi;
ssq = ssq * (temp * temp) + 1.0;
scale = absxi;
}
else
{
temp = absxi / scale;
ssq += temp * temp;
}
}
}
norm = scale * sqrt(ssq);
}
}
else
norm = 0.0;
return norm;
} /* dnrm2_ */
int dscal_(ptrdiff_t *n, double *sa, double *sx, ptrdiff_t *incx)
{
ptrdiff_t i, m, nincx, nn, iincx;
double ssa;
/* scales a vector by a constant.
uses unrolled loops for increment equal to 1.
jack dongarra, linpack, 3/11/78.
modified 3/93 to return if incx .le. 0.
modified 12/3/93, array(1) declarations changed to array(*) */
/* Dereference inputs */
nn = *n;
iincx = *incx;
ssa = *sa;
if (nn > 0 && iincx > 0)
{
if (iincx == 1) /* code for increment equal to 1 */
{
m = nn-4;
/*
for (i = 0; i < m; i += 5)
{
sx[i] = ssa * sx[i];
sx[i+1] = ssa * sx[i+1];
sx[i+2] = ssa * sx[i+2];
sx[i+3] = ssa * sx[i+3];
sx[i+4] = ssa * sx[i+4];
}
for ( ; i < nn; ++i)
sx[i] = ssa * sx[i];
*/
#pragma omp parallel for private(i)
for (i=0 ; i < nn; ++i) /* clean-up loop */
sx[i] = ssa * sx[i];
}
else /* code for increment not equal to 1 */
{
nincx = nn * iincx;
for (i = 0; i < nincx; i += iincx)
sx[i] = ssa * sx[i];
}
}
return 0;
} /* dscal_ */
|
Pragma.h | //===--- Pragma.h - Pragma registration and handling ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the PragmaHandler and PragmaTable interfaces.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LEX_PRAGMA_H
#define LLVM_CLANG_LEX_PRAGMA_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
namespace clang {
class Preprocessor;
class Token;
class IdentifierInfo;
class PragmaNamespace;
/**
* \brief Describes how the pragma was introduced, e.g., with \#pragma,
* _Pragma, or __pragma.
*/
enum PragmaIntroducerKind {
/**
* \brief The pragma was introduced via \#pragma.
*/
PIK_HashPragma,
/**
* \brief The pragma was introduced via the C99 _Pragma(string-literal).
*/
PIK__Pragma,
/**
* \brief The pragma was introduced via the Microsoft
* __pragma(token-string).
*/
PIK___pragma
};
/// PragmaHandler - Instances of this interface defined to handle the various
/// pragmas that the language front-end uses. Each handler optionally has a
/// name (e.g. "pack") and the HandlePragma method is invoked when a pragma with
/// that identifier is found. If a handler does not match any of the declared
/// pragmas the handler with a null identifier is invoked, if it exists.
///
/// Note that the PragmaNamespace class can be used to subdivide pragmas, e.g.
/// we treat "\#pragma STDC" and "\#pragma GCC" as namespaces that contain other
/// pragmas.
class PragmaHandler {
std::string Name;
public:
explicit PragmaHandler(StringRef name) : Name(name) {}
PragmaHandler() {}
virtual ~PragmaHandler();
StringRef getName() const { return Name; }
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken) = 0;
/// getIfNamespace - If this is a namespace, return it. This is equivalent to
/// using a dynamic_cast, but doesn't require RTTI.
virtual PragmaNamespace *getIfNamespace() { return nullptr; }
};
/// EmptyPragmaHandler - A pragma handler which takes no action, which can be
/// used to ignore particular pragmas.
class EmptyPragmaHandler : public PragmaHandler {
public:
EmptyPragmaHandler();
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken) override;
};
/// PragmaNamespace - This PragmaHandler subdivides the namespace of pragmas,
/// allowing hierarchical pragmas to be defined. Common examples of namespaces
/// are "\#pragma GCC", "\#pragma STDC", and "\#pragma omp", but any namespaces
/// may be (potentially recursively) defined.
class PragmaNamespace : public PragmaHandler {
/// Handlers - This is a map of the handlers in this namespace with their name
/// as key.
///
llvm::StringMap<PragmaHandler*> Handlers;
public:
explicit PragmaNamespace(StringRef Name) : PragmaHandler(Name) {}
virtual ~PragmaNamespace();
/// FindHandler - Check to see if there is already a handler for the
/// specified name. If not, return the handler for the null name if it
/// exists, otherwise return null. If IgnoreNull is true (the default) then
/// the null handler isn't returned on failure to match.
PragmaHandler *FindHandler(StringRef Name,
bool IgnoreNull = true) const;
/// AddPragma - Add a pragma to this namespace.
///
void AddPragma(PragmaHandler *Handler);
/// RemovePragmaHandler - Remove the given handler from the
/// namespace.
void RemovePragmaHandler(PragmaHandler *Handler);
bool IsEmpty() {
return Handlers.empty();
}
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken) override;
PragmaNamespace *getIfNamespace() override { return this; }
};
} // end namespace clang
#endif
|
displacement_lagrangemultiplier_residual_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
#include "utilities/constraint_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierResidualContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierResidualContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierResidualContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierResidualContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor (parameters)
* @param DispRatioTolerance Relative tolerance for displacement residual error
* @param DispAbsTolerance Absolute tolerance for displacement residual error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierResidualContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType LMRatioTolerance,
const TDataType LMAbsTolerance,
const bool EnsureContact = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
mLMRatioTolerance = LMRatioTolerance;
mLMAbsTolerance = LMAbsTolerance;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierResidualContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// The default parameters
Parameters default_parameters = Parameters(R"(
{
"ensure_contact" : false,
"print_convergence_criterion" : false,
"residual_relative_tolerance" : 1.0e-4,
"residual_absolute_tolerance" : 1.0e-9,
"contact_residual_relative_tolerance" : 1.0e-4,
"contact_residual_absolute_tolerance" : 1.0e-9
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// The displacement residual
mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble();
// The contact residual
mLMRatioTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
mLMAbsTolerance = ThisParameters["contact_residual_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
//* Copy constructor.
DisplacementLagrangeMultiplierResidualContactCriteria( DisplacementLagrangeMultiplierResidualContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm)
,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm)
,mLMRatioTolerance(rOther.mLMRatioTolerance)
,mLMAbsTolerance(rOther.mLMAbsTolerance)
,mLMInitialResidualNorm(rOther.mLMInitialResidualNorm)
,mLMCurrentResidualNorm(rOther.mLMCurrentResidualNorm)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierResidualContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something
// Initialize
TDataType disp_residual_solution_norm = 0.0, lm_residual_solution_norm = 0.0;
IndexType disp_dof_num(0),lm_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
TDataType residual_dof_value = 0.0;
// Loop over Dofs
#pragma omp parallel for reduction(+:disp_residual_solution_norm,lm_residual_solution_norm,disp_dof_num,lm_dof_num,dof_id,residual_dof_value)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
dof_id = it_dof->EquationId();
if (mActiveDofs[dof_id]) {
residual_dof_value = rb[dof_id];
const auto curr_var = it_dof->GetVariable();
if ((curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) {
lm_residual_solution_norm += residual_dof_value * residual_dof_value;
lm_dof_num++;
} else {
disp_residual_solution_norm += residual_dof_value * residual_dof_value;
disp_dof_num++;
}
}
}
mDispCurrentResidualNorm = disp_residual_solution_norm;
mLMCurrentResidualNorm = lm_residual_solution_norm;
TDataType residual_disp_ratio = 1.0;
TDataType residual_lm_ratio = 1.0;
// We initialize the solution
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET)) {
mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm;
mLMInitialResidualNorm = (lm_residual_solution_norm == 0.0) ? 1.0 : lm_residual_solution_norm;
residual_disp_ratio = 1.0;
residual_lm_ratio = 1.0;
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the displacements
residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm;
// We calculate the ratio of the LM
residual_lm_ratio = mLMCurrentResidualNorm/mLMInitialResidualNorm;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
// We calculate the absolute norms
const TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num;
const TDataType residual_lm_abs = mLMCurrentResidualNorm/lm_dof_num;
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance;
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << residual_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << residual_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tLAGRANGE MUL: RATIO = " << residual_lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << residual_lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl;
}
}
}
r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > residual_lm_ratio) ? residual_disp_ratio : residual_lm_ratio;
r_process_info[RESIDUAL_NORM] = (residual_lm_abs > mLMAbsTolerance) ? residual_lm_abs : mLMAbsTolerance;
// We check if converged
const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance);
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) ? true : (residual_lm_ratio <= mLMRatioTolerance || residual_lm_abs <= mLMAbsTolerance);
if (disp_converged && lm_converged ) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
Table << BOLDFONT(FGRN(" Achieved"));
else
Table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl;
}
}
return false;
}
} else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Initialize flag
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
// Filling mActiveDofs when MPC exist
ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual
TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual
TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual
TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM residual
TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM residual
TDataType mLMInitialResidualNorm; /// The reference norm of the LM residual
TDataType mLMCurrentResidualNorm; /// The current norm of the LM residual
std::vector<bool> mActiveDofs; /// This vector contains the dofs that are active
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierResidualContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3, false));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H */
|
J2OrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
// Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#include "Configuration.h"
#if QMC_BUILD_LEVEL<5
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#include "QMCWaveFunctions/Jastrow/DiffTwoBodyJastrowOrbital.h"
#include <qmc_common.h>
#endif
#include "Particle/DistanceTableData.h"
#include <simd/allocator.hpp>
#include <simd/algorithm.hpp>
#include <map>
#include <numeric>
namespace qmcplusplus
{
/** @ingroup WaveFunctionComponent
* @brief Specialization for two-body Jastrow function using multiple functors
*
* Each pair-type can have distinct function \f$u(r_{ij})\f$.
* For electrons, distinct pair correlation functions are used
* for spins up-up/down-down and up-down/down-up.
*
* Based on J2OrbitalSoA.h with these considerations
* - DistanceTableData using SoA containers
* - support mixed precision: FT::real_type != OHMMS_PRECISION
* - loops over the groups: elminated PairID
* - support simd function
* - double the loop counts
* - Memory use is O(N).
*/
template<class FT>
struct J2OrbitalSoA : public WaveFunctionComponent
{
///alias FuncType
using FuncType=FT;
///type of each component U, dU, d2U;
using valT=typename FT::real_type;
///element position type
using posT=TinyVector<valT,OHMMS_DIM>;
///use the same container
using RowContainer=DistanceTableData::RowContainer;
///number of particles
size_t N;
///number of particles + padded
size_t N_padded;
///number of groups of the target particleset
size_t NumGroups;
///task id
int TaskID;
///Used to compute correction
bool FirstTime;
///diff value
RealType DiffVal;
///Correction
RealType KEcorr;
///\f$Uat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Uat;
///\f$dUat[i] = sum_(j) du_{i,j}\f$
using gContainer_type=VectorSoaContainer<valT,OHMMS_DIM>;
gContainer_type dUat;
///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$
Vector<valT> d2Uat;
valT cur_Uat;
aligned_vector<valT> cur_u, cur_du, cur_d2u;
aligned_vector<valT> old_u, old_du, old_d2u;
aligned_vector<valT> DistCompressed;
aligned_vector<int> DistIndice;
///Container for \f$F[ig*NumGroups+jg]\f$
std::vector<FT*> F;
///Uniquue J2 set for cleanup
std::map<std::string,FT*> J2Unique;
J2OrbitalSoA(ParticleSet& p, int tid);
J2OrbitalSoA(const J2OrbitalSoA& rhs)=delete;
~J2OrbitalSoA();
/* initialize storage */
void init(ParticleSet& p);
/** add functor for (ia,ib) pair */
void addFunc(int ia, int ib, FT* j);
void resetTargetParticleSet(ParticleSet& P)
{
if(dPsi)
dPsi->resetTargetParticleSet(P);
}
/** check in an optimizable parameter
* @param o a super set of optimizable variables
*/
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
typename std::map<std::string,FT*>::iterator it(J2Unique.begin()),it_end(J2Unique.end());
while(it != it_end)
{
(*it).second->checkInVariables(active);
(*it).second->checkInVariables(myVars);
++it;
}
}
/** check out optimizable variables
*/
void checkOutVariables(const opt_variables_type& active)
{
myVars.getIndex(active);
Optimizable=myVars.is_optimizable();
typename std::map<std::string,FT*>::iterator it(J2Unique.begin()),it_end(J2Unique.end());
while(it != it_end)
{
(*it).second->checkOutVariables(active);
++it;
}
if(dPsi)
dPsi->checkOutVariables(active);
}
///reset the value of all the unique Two-Body Jastrow functions
void resetParameters(const opt_variables_type& active)
{
if(!Optimizable)
return;
typename std::map<std::string,FT*>::iterator it(J2Unique.begin()),it_end(J2Unique.end());
while(it != it_end)
{
(*it).second->resetParameters(active);
++it;
}
if(dPsi)
dPsi->resetParameters( active );
for(int i=0; i<myVars.size(); ++i)
{
int ii=myVars.Index[i];
if(ii>=0)
myVars[i]= active[ii];
}
}
/** print the state, e.g., optimizables */
void reportStatus(std::ostream& os)
{
typename std::map<std::string,FT*>::iterator it(J2Unique.begin()),it_end(J2Unique.end());
while(it != it_end)
{
(*it).second->myVars.print(os);
++it;
}
ChiesaKEcorrection();
}
RealType ChiesaKEcorrection() { return RealType();}
/**@} */
WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const;
RealType evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L);
/** recompute internal data assuming distance table is fully ready */
void recompute(ParticleSet& P);
ValueType ratio(ParticleSet& P, int iat);
void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for(int k=0; k<ratios.size(); ++k)
ratios[k]=std::exp(Uat[VP.refPtcl] -
computeU(VP.refPS, VP.refPtcl, VP.DistTables[0]->Distances[k]));
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios);
GradType evalGrad(ParticleSet& P, int iat);
ValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat);
void acceptMove(ParticleSet& P, int iat);
inline void restore(int iat) {}
/** compute G and L after the sweep
*/
void evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L, bool fromscratch=false);
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if ( Bytes_in_WFBuffer == 0 )
{
Bytes_in_WFBuffer = buf.current();
buf.add(Uat.begin(), Uat.end());
buf.add(dUat.data(), dUat.end());
buf.add(d2Uat.begin(), d2Uat.end());
Bytes_in_WFBuffer = buf.current()-Bytes_in_WFBuffer;
// free local space
Uat.free();
dUat.free();
d2Uat.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Uat.attachReference(buf.lendReference<valT>(N), N);
dUat.attachReference(N, N_padded, buf.lendReference<valT>(N_padded*OHMMS_DIM));
d2Uat.attachReference(buf.lendReference<valT>(N), N);
}
RealType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch=false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
/*@{ internal compute engines*/
inline valT computeU(const ParticleSet& P, int iat, const RealType* restrict dist)
{
valT curUat(0);
const int igt=P.GroupID[iat]*NumGroups;
for(int jg=0; jg<NumGroups; ++jg)
{
const FuncType& f2(*F[igt+jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
curUat += f2.evaluateV(iat, iStart, iEnd, dist, DistCompressed.data());
}
return curUat;
}
inline void computeU3(const ParticleSet& P, int iat, const RealType* restrict dist,
RealType* restrict u, RealType* restrict du, RealType* restrict d2u, bool triangle=false);
/** compute gradient
*/
inline posT accumulateG(const valT* restrict du, const RowContainer& displ) const
{
posT grad;
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
const valT* restrict dX=displ.data(idim);
valT s=valT();
#pragma omp simd reduction(+:s) aligned(du,dX)
for(int jat=0; jat<N; ++jat) s+=du[jat]*dX[jat];
grad[idim]=s;
}
return grad;
}
/**@} */
};
template<typename FT>
J2OrbitalSoA<FT>::J2OrbitalSoA(ParticleSet& p, int tid) : TaskID(tid)
{
init(p);
FirstTime =true;
KEcorr=0.0;
OrbitalName = "J2OrbitalSoA";
}
template<typename FT>
J2OrbitalSoA<FT>::~J2OrbitalSoA()
{
auto it=J2Unique.begin();
while(it != J2Unique.end())
{
delete ((*it).second);
++it;
}
}//need to clean up J2Unique
template<typename FT>
void J2OrbitalSoA<FT>::init(ParticleSet& p)
{
N=p.getTotalNum();
N_padded=getAlignedSize<valT>(N);
NumGroups=p.groups();
Uat.resize(N);
dUat.resize(N);
d2Uat.resize(N);
cur_u.resize(N);
cur_du.resize(N);
cur_d2u.resize(N);
old_u.resize(N);
old_du.resize(N);
old_d2u.resize(N);
F.resize(NumGroups*NumGroups,nullptr);
DistCompressed.resize(N);
DistIndice.resize(N);
}
template<typename FT>
void J2OrbitalSoA<FT>::addFunc(int ia, int ib, FT* j)
{
if(ia==ib)
{
if(ia==0)//first time, assign everything
{
int ij=0;
for(int ig=0; ig<NumGroups; ++ig)
for(int jg=0; jg<NumGroups; ++jg, ++ij)
if(F[ij]==nullptr) F[ij]=j;
}
else
F[ia*NumGroups+ib]=j;
}
else
{
if(N==2)
{
// a very special case, 1 up + 1 down
// uu/dd was prevented by the builder
for(int ig=0; ig<NumGroups; ++ig)
for(int jg=0; jg<NumGroups; ++jg)
F[ig*NumGroups+jg]=j;
}
else
{
// generic case
F[ia*NumGroups+ib]=j;
F[ib*NumGroups+ia]=j;
}
}
std::stringstream aname;
aname<<ia<<ib;
J2Unique[aname.str()]=j;
//ChiesaKEcorrection();
FirstTime = false;
}
template<typename FT>
WaveFunctionComponentPtr J2OrbitalSoA<FT>::makeClone(ParticleSet& tqp) const
{
J2OrbitalSoA<FT>* j2copy=new J2OrbitalSoA<FT>(tqp,-1);
if (dPsi)
j2copy->dPsi = dPsi->makeClone(tqp);
std::map<const FT*,FT*> fcmap;
for(int ig=0; ig<NumGroups; ++ig)
for(int jg=ig; jg<NumGroups; ++jg)
{
int ij=ig*NumGroups+jg;
if(F[ij]==0)
continue;
typename std::map<const FT*,FT*>::iterator fit=fcmap.find(F[ij]);
if(fit == fcmap.end())
{
FT* fc=new FT(*F[ij]);
j2copy->addFunc(ig,jg,fc);
//if (dPsi) (j2copy->dPsi)->addFunc(aname.str(),ig,jg,fc);
fcmap[F[ij]]=fc;
}
}
j2copy->Optimizable = Optimizable;
return j2copy;
}
/** intenal function to compute \f$\sum_j u(r_j), du/dr, d2u/dr2\f$
* @param P particleset
* @param iat particle index
* @param dist starting distance
* @param u starting value
* @param du starting first deriv
* @param d2u starting second deriv
*/
template<typename FT>
inline void
J2OrbitalSoA<FT>::computeU3(const ParticleSet& P, int iat, const RealType* restrict dist,
RealType* restrict u, RealType* restrict du, RealType* restrict d2u, bool triangle)
{
const int jelmax=triangle?iat:N;
constexpr valT czero(0);
std::fill_n(u, jelmax,czero);
std::fill_n(du, jelmax,czero);
std::fill_n(d2u,jelmax,czero);
const int igt=P.GroupID[iat]*NumGroups;
for(int jg=0; jg<NumGroups; ++jg)
{
const FuncType& f2(*F[igt+jg]);
int iStart = P.first(jg);
int iEnd = std::min(jelmax,P.last(jg));
f2.evaluateVGL(iat, iStart, iEnd, dist, u, du, d2u, DistCompressed.data(), DistIndice.data());
}
//u[iat]=czero;
//du[iat]=czero;
//d2u[iat]=czero;
}
template<typename FT>
typename J2OrbitalSoA<FT>::ValueType
J2OrbitalSoA<FT>::ratio(ParticleSet& P, int iat)
{
//only ratio, ready to compute it again
UpdateMode=ORB_PBYP_RATIO;
cur_Uat=computeU(P, iat, P.DistTables[0]->Temp_r.data());
return std::exp(Uat[iat]-cur_Uat);
}
template<typename FT>
inline void
J2OrbitalSoA<FT>::evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const DistanceTableData* d_table=P.DistTables[0];
const auto dist=d_table->Temp_r.data();
for(int ig=0; ig<NumGroups; ++ig)
{
const int igt=ig*NumGroups;
valT sumU(0);
for(int jg=0; jg<NumGroups; ++jg)
{
const FuncType& f2(*F[igt+jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
sumU += f2.evaluateV(-1, iStart, iEnd, dist, DistCompressed.data());
}
for(int i=P.first(ig); i<P.last(ig); ++i)
{
// remove self-interaction
const valT Uself = F[igt+ig]->evaluate(dist[i]);
ratios[i]=std::exp(Uat[i]+Uself-sumU);
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::GradType
J2OrbitalSoA<FT>::evalGrad(ParticleSet& P, int iat)
{
return GradType(dUat[iat]);
}
template<typename FT>
typename J2OrbitalSoA<FT>::ValueType
J2OrbitalSoA<FT>::ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode=ORB_PBYP_PARTIAL;
computeU3(P,iat,P.DistTables[0]->Temp_r.data(), cur_u.data(),cur_du.data(),cur_d2u.data());
cur_Uat=simd::accumulate_n(cur_u.data(),N,valT());
DiffVal=Uat[iat]-cur_Uat;
grad_iat+=accumulateG(cur_du.data(),P.DistTables[0]->Temp_dr);
return std::exp(DiffVal);
}
template<typename FT>
void
J2OrbitalSoA<FT>::acceptMove(ParticleSet& P, int iat)
{
// get the old u, du, d2u
const DistanceTableData* d_table=P.DistTables[0];
computeU3(P,iat,d_table->Distances[iat],old_u.data(),old_du.data(),old_d2u.data());
if(UpdateMode == ORB_PBYP_RATIO)
{//ratio-only during the move; need to compute derivatives
const auto dist=d_table->Temp_r.data();
computeU3(P,iat,dist,cur_u.data(),cur_du.data(),cur_d2u.data());
}
valT cur_d2Uat(0);
const auto& new_dr=d_table->Temp_dr;
const auto& old_dr=d_table->Displacements[iat];
constexpr valT lapfac=OHMMS_DIM-RealType(1);
#pragma omp simd reduction(+:cur_d2Uat)
for(int jat=0; jat<N; jat++)
{
const valT du = cur_u[jat] - old_u[jat];
const valT newl = cur_d2u[jat] + lapfac*cur_du[jat];
const valT dl = old_d2u[jat] + lapfac*old_du[jat] - newl;
Uat[jat] += du;
d2Uat[jat] += dl;
cur_d2Uat -= newl;
}
posT cur_dUat;
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
const valT* restrict new_dX=new_dr.data(idim);
const valT* restrict old_dX=old_dr.data(idim);
const valT* restrict cur_du_pt=cur_du.data();
const valT* restrict old_du_pt=old_du.data();
valT* restrict save_g=dUat.data(idim);
valT cur_g=cur_dUat[idim];
#pragma omp simd reduction(+:cur_g) aligned(old_dX,new_dX,save_g,cur_du_pt,old_du_pt)
for(int jat=0; jat<N; jat++)
{
const valT newg = cur_du_pt[jat] * new_dX[jat];
const valT dg = newg - old_du_pt[jat]*old_dX[jat];
save_g[jat] -= dg;
cur_g += newg;
}
cur_dUat[idim] = cur_g;
}
LogValue += Uat[iat]-cur_Uat;
Uat[iat] = cur_Uat;
dUat(iat) = cur_dUat;
d2Uat[iat] = cur_d2Uat;
}
template<typename FT>
void
J2OrbitalSoA<FT>::recompute(ParticleSet& P)
{
const DistanceTableData* d_table=P.DistTables[0];
for(int ig=0; ig<NumGroups; ++ig)
{
const int igt=ig*NumGroups;
for(int iat=P.first(ig),last=P.last(ig); iat<last; ++iat)
{
computeU3(P,iat,d_table->Distances[iat],cur_u.data(),cur_du.data(),cur_d2u.data(),true);
Uat[iat]=simd::accumulate_n(cur_u.data(),iat,valT());
posT grad;
valT lap(0);
const valT* restrict u = cur_u.data();
const valT* restrict du = cur_du.data();
const valT* restrict d2u = cur_d2u.data();
const RowContainer& displ = d_table->Displacements[iat];
constexpr valT lapfac=OHMMS_DIM-RealType(1);
#pragma omp simd reduction(+:lap) aligned(du,d2u)
for(int jat=0; jat<iat; ++jat)
lap+=d2u[jat]+lapfac*du[jat];
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
const valT* restrict dX=displ.data(idim);
valT s=valT();
#pragma omp simd reduction(+:s) aligned(du,dX)
for(int jat=0; jat<iat; ++jat) s+=du[jat]*dX[jat];
grad[idim]=s;
}
dUat(iat)=grad;
d2Uat[iat]=-lap;
// add the contribution from the upper triangle
#pragma omp simd aligned(u,du,d2u)
for(int jat=0; jat<iat; jat++)
{
Uat[jat] += u[jat];
d2Uat[jat] -= d2u[jat]+lapfac*du[jat];
}
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
valT* restrict save_g=dUat.data(idim);
const valT* restrict dX=displ.data(idim);
#pragma omp simd aligned(save_g,du,dX)
for(int jat=0; jat<iat; jat++)
save_g[jat]-=du[jat]*dX[jat];
}
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::RealType
J2OrbitalSoA<FT>::evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L)
{
evaluateGL(P,G,L,true);
return LogValue;
}
template<typename FT>
void
J2OrbitalSoA<FT>::evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L, bool fromscratch)
{
if(fromscratch) recompute(P);
LogValue=valT(0);
for(int iat=0; iat<N; ++iat)
{
LogValue += Uat[iat];
G[iat] += dUat[iat];
L[iat] += d2Uat[iat];
}
constexpr valT mhalf(-0.5);
LogValue=mhalf*LogValue;
}
}
#endif
|
resample.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS AAA M M PPPP L EEEEE %
% R R E SS A A MM MM P P L E %
% RRRR EEE SSS AAAAA M M M PPPP L EEE %
% R R E SS A A M M P L E %
% R R EEEEE SSSSS A A M M P LLLLL EEEEE %
% %
% %
% MagickCore Pixel Resampling Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% August 2007 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/color-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/resample.h"
#include "MagickCore/resize.h"
#include "MagickCore/resize-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/option.h"
/*
EWA Resampling Options
*/
/* select ONE resampling method */
#define EWA 1 /* Normal EWA handling - raw or clamped */
/* if 0 then use "High Quality EWA" */
#define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */
#define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */
/* output debugging information */
#define DEBUG_ELLIPSE 0 /* output ellipse info for debug */
#define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */
#define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */
#if ! FILTER_DIRECT
#define WLUT_WIDTH 1024 /* size of the filter cache */
#endif
/*
Typedef declarations.
*/
struct _ResampleFilter
{
CacheView
*view;
Image
*image;
ExceptionInfo
*exception;
MagickBooleanType
debug;
/* Information about image being resampled */
ssize_t
image_area;
PixelInterpolateMethod
interpolate;
VirtualPixelMethod
virtual_pixel;
FilterType
filter;
/* processing settings needed */
MagickBooleanType
limit_reached,
do_interpolate,
average_defined;
PixelInfo
average_pixel;
/* current ellipitical area being resampled around center point */
double
A, B, C,
Vlimit, Ulimit, Uwidth, slope;
#if FILTER_LUT
/* LUT of weights for filtered average in elliptical area */
double
filter_lut[WLUT_WIDTH];
#else
/* Use a Direct call to the filter functions */
ResizeFilter
*filter_def;
double
F;
#endif
/* the practical working support of the filter */
double
support;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResampleFilter() initializes the information resample needs do to a
% scaled lookup of a color from an image, using area sampling.
%
% The algorithm is based on a Elliptical Weighted Average, where the pixels
% found in a large elliptical area is averaged together according to a
% weighting (filter) function. For more details see "Fundamentals of Texture
% Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17,
% 1989. Available for free from, http://www.cs.cmu.edu/~ph/
%
% As EWA resampling (or any sort of resampling) can require a lot of
% calculations to produce a distorted scaling of the source image for each
% output pixel, the ResampleFilter structure generated holds that information
% between individual image resampling.
%
% This function will make the appropriate AcquireCacheView() calls
% to view the image, calling functions do not need to open a cache view.
%
% Usage Example...
% resample_filter=AcquireResampleFilter(image,exception);
% SetResampleFilter(resample_filter, GaussianFilter);
% for (y=0; y < (ssize_t) image->rows; y++) {
% for (x=0; x < (ssize_t) image->columns; x++) {
% u= ....; v= ....;
% ScaleResampleFilter(resample_filter, ... scaling vectors ...);
% (void) ResamplePixelColor(resample_filter,u,v,&pixel);
% ... assign resampled pixel value ...
% }
% }
% DestroyResampleFilter(resample_filter);
%
% The format of the AcquireResampleFilter method is:
%
% ResampleFilter *AcquireResampleFilter(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResampleFilter *AcquireResampleFilter(const Image *image,
ExceptionInfo *exception)
{
register ResampleFilter
*resample_filter;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
resample_filter=(ResampleFilter *) AcquireCriticalMemory(sizeof(
*resample_filter));
(void) memset(resample_filter,0,sizeof(*resample_filter));
resample_filter->exception=exception;
resample_filter->image=ReferenceImage((Image *) image);
resample_filter->view=AcquireVirtualCacheView(resample_filter->image,
exception);
resample_filter->debug=IsEventLogging();
resample_filter->image_area=(ssize_t) (image->columns*image->rows);
resample_filter->average_defined=MagickFalse;
resample_filter->signature=MagickCoreSignature;
SetResampleFilter(resample_filter,image->filter);
(void) SetResampleFilterInterpolateMethod(resample_filter,image->interpolate);
(void) SetResampleFilterVirtualPixelMethod(resample_filter,
GetImageVirtualPixelMethod(image));
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResampleFilter() finalizes and cleans up the resampling
% resample_filter as returned by AcquireResampleFilter(), freeing any memory
% or other information as needed.
%
% The format of the DestroyResampleFilter method is:
%
% ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling information structure
%
*/
MagickExport ResampleFilter *DestroyResampleFilter(
ResampleFilter *resample_filter)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->view=DestroyCacheView(resample_filter->view);
resample_filter->image=DestroyImage(resample_filter->image);
#if ! FILTER_LUT
resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def);
#endif
resample_filter->signature=(~MagickCoreSignature);
resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter);
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResamplePixelColor() samples the pixel values surrounding the location
% given using an elliptical weighted average, at the scale previously
% calculated, and in the most efficent manner possible for the
% VirtualPixelMethod setting.
%
% The format of the ResamplePixelColor method is:
%
% MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter,
% const double u0,const double v0,PixelInfo *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o u0,v0: A double representing the center of the area to resample,
% The distortion transformed transformed x,y coordinate.
%
% o pixel: the resampled pixel is returned here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResamplePixelColor(
ResampleFilter *resample_filter,const double u0,const double v0,
PixelInfo *pixel,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t u,v, v1, v2, uw, hit;
double u1;
double U,V,Q,DQ,DDQ;
double divisor_c,divisor_m;
register double weight;
register const Quantum *pixels;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
status=MagickTrue;
/* GetPixelInfo(resample_filter->image,pixel); */
if ( resample_filter->do_interpolate ) {
status=InterpolatePixelInfo(resample_filter->image,resample_filter->view,
resample_filter->interpolate,u0,v0,pixel,resample_filter->exception);
return(status);
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0);
#endif
/*
Does resample area Miss the image Proper?
If and that area a simple solid color - then simply return that color!
This saves a lot of calculation when resampling outside the bounds of
the source image.
However it probably should be expanded to image bounds plus the filters
scaled support size.
*/
hit = 0;
switch ( resample_filter->virtual_pixel ) {
case BackgroundVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case MaskVirtualPixelMethod:
if ( resample_filter->limit_reached
|| u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
|| v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++;
break;
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 + resample_filter->Ulimit < 0.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
)
hit++;
break;
case HorizontalTileVirtualPixelMethod:
if ( v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++; /* outside the horizontally tiled images. */
break;
case VerticalTileVirtualPixelMethod:
if ( u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
)
hit++; /* outside the vertically tiled images. */
break;
case DitherVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 + resample_filter->Ulimit < -32.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
)
hit++;
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
/* resampling of area is always needed - no VP limits */
break;
}
if ( hit ) {
/* The area being resampled is simply a solid color
* just return a single lookup color.
*
* Should this return the users requested interpolated color?
*/
status=InterpolatePixelInfo(resample_filter->image,resample_filter->view,
IntegerInterpolatePixel,u0,v0,pixel,resample_filter->exception);
return(status);
}
/*
When Scaling limits reached, return an 'averaged' result.
*/
if ( resample_filter->limit_reached ) {
switch ( resample_filter->virtual_pixel ) {
/* This is always handled by the above, so no need.
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case GrayVirtualPixelMethod,
case WhiteVirtualPixelMethod
case MaskVirtualPixelMethod:
*/
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case DitherVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
/* We need an average edge pixel, from the correct edge!
How should I calculate an average edge color?
Just returning an averaged neighbourhood,
works well in general, but falls down for TileEdge methods.
This needs to be done properly!!!!!!
*/
status=InterpolatePixelInfo(resample_filter->image,
resample_filter->view,AverageInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
break;
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
/* just return the background pixel - Is there more direct way? */
status=InterpolatePixelInfo(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel,
resample_filter->exception);
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
default:
/* generate a average color of the WHOLE image */
if ( resample_filter->average_defined == MagickFalse ) {
Image
*average_image;
CacheView
*average_view;
GetPixelInfo(resample_filter->image,(PixelInfo *)
&resample_filter->average_pixel);
resample_filter->average_defined=MagickTrue;
/* Try to get an averaged pixel color of whole image */
average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,
resample_filter->exception);
if (average_image == (Image *) NULL)
{
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
average_view=AcquireVirtualCacheView(average_image,exception);
pixels=GetCacheViewVirtualPixels(average_view,0,0,1,1,
resample_filter->exception);
if (pixels == (const Quantum *) NULL) {
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
GetPixelInfoPixel(resample_filter->image,pixels,
&(resample_filter->average_pixel));
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod )
{
/* CheckerTile is a alpha blend of the image's average pixel
color and the current background color */
/* image's average pixel color */
weight = QuantumScale*((double)
resample_filter->average_pixel.alpha);
resample_filter->average_pixel.red *= weight;
resample_filter->average_pixel.green *= weight;
resample_filter->average_pixel.blue *= weight;
divisor_c = weight;
/* background color */
weight = QuantumScale*((double)
resample_filter->image->background_color.alpha);
resample_filter->average_pixel.red +=
weight*resample_filter->image->background_color.red;
resample_filter->average_pixel.green +=
weight*resample_filter->image->background_color.green;
resample_filter->average_pixel.blue +=
weight*resample_filter->image->background_color.blue;
resample_filter->average_pixel.alpha +=
resample_filter->image->background_color.alpha;
divisor_c += weight;
/* alpha blend */
resample_filter->average_pixel.red /= divisor_c;
resample_filter->average_pixel.green /= divisor_c;
resample_filter->average_pixel.blue /= divisor_c;
resample_filter->average_pixel.alpha /= 2; /* 50% blend */
}
}
*pixel=resample_filter->average_pixel;
break;
}
return(status);
}
/*
Initialize weighted average data collection
*/
hit = 0;
divisor_c = 0.0;
divisor_m = 0.0;
pixel->red = pixel->green = pixel->blue = 0.0;
if (pixel->colorspace == CMYKColorspace)
pixel->black = 0.0;
if (pixel->alpha_trait != UndefinedPixelTrait)
pixel->alpha = 0.0;
/*
Determine the parellelogram bounding box fitted to the ellipse
centered at u0,v0. This area is bounding by the lines...
*/
v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */
v2 = (ssize_t)floor(v0 + resample_filter->Vlimit);
/* scan line start and width accross the parallelogram */
u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth;
uw = (ssize_t)(2.0*resample_filter->Uwidth)+1;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2);
(void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw);
#else
# define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */
#endif
/*
Do weighted resampling of all pixels, within the scaled ellipse,
bound by a Parellelogram fitted to the ellipse.
*/
DDQ = 2*resample_filter->A;
for( v=v1; v<=v2; v++ ) {
#if DEBUG_HIT_MISS
long uu = ceil(u1); /* actual pixel location (for debug only) */
(void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v);
#endif
u = (ssize_t)ceil(u1); /* first pixel in scanline */
u1 += resample_filter->slope; /* start of next scan line */
/* location of this first pixel, relative to u0,v0 */
U = (double)u-u0;
V = (double)v-v0;
/* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */
Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V;
DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V;
/* get the scanline of pixels for this v */
pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw,
1,resample_filter->exception);
if (pixels == (const Quantum *) NULL)
return(MagickFalse);
/* count up the weighted pixel colors */
for( u=0; u<uw; u++ ) {
#if FILTER_LUT
/* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */
if ( Q < (double)WLUT_WIDTH ) {
weight = resample_filter->filter_lut[(int)Q];
#else
/* Note that the ellipse has been pre-scaled so F = support^2 */
if ( Q < (double)resample_filter->F ) {
weight = GetResizeFilterWeight(resample_filter->filter_def,
sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */
#endif
pixel->alpha += weight*GetPixelAlpha(resample_filter->image,pixels);
divisor_m += weight;
if (pixel->alpha_trait != UndefinedPixelTrait)
weight *= QuantumScale*((double) GetPixelAlpha(resample_filter->image,pixels));
pixel->red += weight*GetPixelRed(resample_filter->image,pixels);
pixel->green += weight*GetPixelGreen(resample_filter->image,pixels);
pixel->blue += weight*GetPixelBlue(resample_filter->image,pixels);
if (pixel->colorspace == CMYKColorspace)
pixel->black += weight*GetPixelBlack(resample_filter->image,pixels);
divisor_c += weight;
hit++;
#if DEBUG_HIT_MISS
/* mark the pixel according to hit/miss of the ellipse */
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
} else {
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
}
uu++;
#else
}
#endif
pixels+=GetPixelChannels(resample_filter->image);
Q += DQ;
DQ += DDQ;
}
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) );
#endif
/*
Result sanity check -- this should NOT happen
*/
if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) {
/* not enough pixels, or bad weighting in resampling,
resort to direct interpolation */
#if DEBUG_NO_PIXEL_HIT
pixel->alpha = pixel->red = pixel->green = pixel->blue = 0;
pixel->red = QuantumRange; /* show pixels for which EWA fails */
#else
status=InterpolatePixelInfo(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
#endif
return status;
}
/*
Finialize results of resampling
*/
divisor_m = 1.0/divisor_m;
if (pixel->alpha_trait != UndefinedPixelTrait)
pixel->alpha = (double) ClampToQuantum(divisor_m*pixel->alpha);
divisor_c = 1.0/divisor_c;
pixel->red = (double) ClampToQuantum(divisor_c*pixel->red);
pixel->green = (double) ClampToQuantum(divisor_c*pixel->green);
pixel->blue = (double) ClampToQuantum(divisor_c*pixel->blue);
if (pixel->colorspace == CMYKColorspace)
pixel->black = (double) ClampToQuantum(divisor_c*pixel->black);
return(MagickTrue);
}
#if EWA && EWA_CLAMP
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
- C l a m p U p A x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampUpAxes() function converts the input vectors into a major and
% minor axis unit vectors, and their magnitude. This allows us to
% ensure that the ellipse generated is never smaller than the unit
% circle and thus never too small for use in EWA resampling.
%
% This purely mathematical 'magic' was provided by Professor Nicolas
% Robidoux and his Masters student Chantal Racette.
%
% Reference: "We Recommend Singular Value Decomposition", David Austin
% http://www.ams.org/samplings/feature-column/fcarc-svd
%
% By generating major and minor axis vectors, we can actually use the
% ellipse in its "canonical form", by remapping the dx,dy of the
% sampled point into distances along the major and minor axis unit
% vectors.
%
% Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form
*/
static inline void ClampUpAxes(const double dux,
const double dvx,
const double duy,
const double dvy,
double *major_mag,
double *minor_mag,
double *major_unit_x,
double *major_unit_y,
double *minor_unit_x,
double *minor_unit_y)
{
/*
* ClampUpAxes takes an input 2x2 matrix
*
* [ a b ] = [ dux duy ]
* [ c d ] = [ dvx dvy ]
*
* and computes from it the major and minor axis vectors [major_x,
* major_y] and [minor_x,minor_y] of the smallest ellipse containing
* both the unit disk and the ellipse which is the image of the unit
* disk by the linear transformation
*
* [ dux duy ] [S] = [s]
* [ dvx dvy ] [T] = [t]
*
* (The vector [S,T] is the difference between a position in output
* space and [X,Y]; the vector [s,t] is the difference between a
* position in input space and [x,y].)
*/
/*
* Output:
*
* major_mag is the half-length of the major axis of the "new"
* ellipse.
*
* minor_mag is the half-length of the minor axis of the "new"
* ellipse.
*
* major_unit_x is the x-coordinate of the major axis direction vector
* of both the "old" and "new" ellipses.
*
* major_unit_y is the y-coordinate of the major axis direction vector.
*
* minor_unit_x is the x-coordinate of the minor axis direction vector.
*
* minor_unit_y is the y-coordinate of the minor axis direction vector.
*
* Unit vectors are useful for computing projections, in particular,
* to compute the distance between a point in output space and the
* center of a unit disk in output space, using the position of the
* corresponding point [s,t] in input space. Following the clamping,
* the square of this distance is
*
* ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2
* +
* ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2
*
* If such distances will be computed for many [s,t]'s, it makes
* sense to actually compute the reciprocal of major_mag and
* minor_mag and multiply them by the above unit lengths.
*
* Now, if you want to modify the input pair of tangent vectors so
* that it defines the modified ellipse, all you have to do is set
*
* newdux = major_mag * major_unit_x
* newdvx = major_mag * major_unit_y
* newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y
* newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x
*
* and use these tangent vectors as if they were the original ones.
* Usually, this is a drastic change in the tangent vectors even if
* the singular values are not clamped; for example, the minor axis
* vector always points in a direction which is 90 degrees
* counterclockwise from the direction of the major axis vector.
*/
/*
* Discussion:
*
* GOAL: Fix things so that the pullback, in input space, of a disk
* of radius r in output space is an ellipse which contains, at
* least, a disc of radius r. (Make this hold for any r>0.)
*
* ESSENCE OF THE METHOD: Compute the product of the first two
* factors of an SVD of the linear transformation defining the
* ellipse and make sure that both its columns have norm at least 1.
* Because rotations and reflexions map disks to themselves, it is
* not necessary to compute the third (rightmost) factor of the SVD.
*
* DETAILS: Find the singular values and (unit) left singular
* vectors of Jinv, clampling up the singular values to 1, and
* multiply the unit left singular vectors by the new singular
* values in order to get the minor and major ellipse axis vectors.
*
* Image resampling context:
*
* The Jacobian matrix of the transformation at the output point
* under consideration is defined as follows:
*
* Consider the transformation (x,y) -> (X,Y) from input locations
* to output locations. (Anthony Thyssen, elsewhere in resample.c,
* uses the notation (u,v) -> (x,y).)
*
* The Jacobian matrix of the transformation at (x,y) is equal to
*
* J = [ A, B ] = [ dX/dx, dX/dy ]
* [ C, D ] [ dY/dx, dY/dy ]
*
* that is, the vector [A,C] is the tangent vector corresponding to
* input changes in the horizontal direction, and the vector [B,D]
* is the tangent vector corresponding to input changes in the
* vertical direction.
*
* In the context of resampling, it is natural to use the inverse
* Jacobian matrix Jinv because resampling is generally performed by
* pulling pixel locations in the output image back to locations in
* the input image. Jinv is
*
* Jinv = [ a, b ] = [ dx/dX, dx/dY ]
* [ c, d ] [ dy/dX, dy/dY ]
*
* Note: Jinv can be computed from J with the following matrix
* formula:
*
* Jinv = 1/(A*D-B*C) [ D, -B ]
* [ -C, A ]
*
* What we do is modify Jinv so that it generates an ellipse which
* is as close as possible to the original but which contains the
* unit disk. This can be accomplished as follows:
*
* Let
*
* Jinv = U Sigma V^T
*
* be an SVD decomposition of Jinv. (The SVD is not unique, but the
* final ellipse does not depend on the particular SVD.)
*
* We could clamp up the entries of the diagonal matrix Sigma so
* that they are at least 1, and then set
*
* Jinv = U newSigma V^T.
*
* However, we do not need to compute V for the following reason:
* V^T is an orthogonal matrix (that is, it represents a combination
* of rotations and reflexions) so that it maps the unit circle to
* itself. For this reason, the exact value of V does not affect the
* final ellipse, and we can choose V to be the identity
* matrix. This gives
*
* Jinv = U newSigma.
*
* In the end, we return the two diagonal entries of newSigma
* together with the two columns of U.
*/
/*
* ClampUpAxes was written by Nicolas Robidoux and Chantal Racette
* of Laurentian University with insightful suggestions from Anthony
* Thyssen and funding from the National Science and Engineering
* Research Council of Canada. It is distinguished from its
* predecessors by its efficient handling of degenerate cases.
*
* The idea of clamping up the EWA ellipse's major and minor axes so
* that the result contains the reconstruction kernel filter support
* is taken from Andreas Gustaffson's Masters thesis "Interactive
* Image Warping", Helsinki University of Technology, Faculty of
* Information Technology, 59 pages, 1993 (see Section 3.6).
*
* The use of the SVD to clamp up the singular values of the
* Jacobian matrix of the pullback transformation for EWA resampling
* is taken from the astrophysicist Craig DeForest. It is
* implemented in his PDL::Transform code (PDL = Perl Data
* Language).
*/
const double a = dux;
const double b = duy;
const double c = dvx;
const double d = dvy;
/*
* n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the
* squares of the singular values of Jinv.
*/
const double aa = a*a;
const double bb = b*b;
const double cc = c*c;
const double dd = d*d;
/*
* Eigenvectors of n are left singular vectors of Jinv.
*/
const double n11 = aa+bb;
const double n12 = a*c+b*d;
const double n21 = n12;
const double n22 = cc+dd;
const double det = a*d-b*c;
const double twice_det = det+det;
const double frobenius_squared = n11+n22;
const double discriminant =
(frobenius_squared+twice_det)*(frobenius_squared-twice_det);
/*
* In exact arithmetic, discriminant can't be negative. In floating
* point, it can, because of the bad conditioning of SVD
* decompositions done through the associated normal matrix.
*/
const double sqrt_discriminant =
sqrt(discriminant > 0.0 ? discriminant : 0.0);
/*
* s1 is the largest singular value of the inverse Jacobian
* matrix. In other words, its reciprocal is the smallest singular
* value of the Jacobian matrix itself.
* If s1 = 0, both singular values are 0, and any orthogonal pair of
* left and right factors produces a singular decomposition of Jinv.
*/
/*
* Initially, we only compute the squares of the singular values.
*/
const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant);
/*
* s2 the smallest singular value of the inverse Jacobian
* matrix. Its reciprocal is the largest singular value of the
* Jacobian matrix itself.
*/
const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant);
const double s1s1minusn11 = s1s1-n11;
const double s1s1minusn22 = s1s1-n22;
/*
* u1, the first column of the U factor of a singular decomposition
* of Jinv, is a (non-normalized) left singular vector corresponding
* to s1. It has entries u11 and u21. We compute u1 from the fact
* that it is an eigenvector of n corresponding to the eigenvalue
* s1^2.
*/
const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11;
const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22;
/*
* The following selects the largest row of n-s1^2 I as the one
* which is used to find the eigenvector. If both s1^2-n11 and
* s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case,
* any vector is an eigenvector; in addition, norm below is equal to
* zero, and, in exact arithmetic, this is the only case in which
* norm = 0. So, setting u1 to the simple but arbitrary vector [1,0]
* if norm = 0 safely takes care of all cases.
*/
const double temp_u11 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 );
const double temp_u21 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 );
const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21);
/*
* Finalize the entries of first left singular vector (associated
* with the largest singular value).
*/
const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 );
const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 );
/*
* Clamp the singular values up to 1.
*/
*major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) );
*minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) );
/*
* Return the unit major and minor axis direction vectors.
*/
*major_unit_x = u11;
*major_unit_y = u21;
*minor_unit_x = -u21;
*minor_unit_y = u11;
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleResampleFilter() does all the calculations needed to resample an image
% at a specific scale, defined by two scaling vectors. This not using
% a orthogonal scaling, but two distorted scaling vectors, to allow the
% generation of a angled ellipse.
%
% As only two deritive scaling vectors are used the center of the ellipse
% must be the center of the lookup. That is any curvature that the
% distortion may produce is discounted.
%
% The input vectors are produced by either finding the derivitives of the
% distortion function, or the partial derivitives from a distortion mapping.
% They do not need to be the orthogonal dx,dy scaling vectors, but can be
% calculated from other derivatives. For example you could use dr,da/r
% polar coordinate vector scaling vectors
%
% If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y)
% Then the scaling vectors are determined from the deritives...
% du/dx, dv/dx and du/dy, dv/dy
% If the resulting scaling vectors is othogonally aligned then...
% dv/dx = 0 and du/dy = 0
% Producing an othogonally alligned ellipse in source space for the area to
% be resampled.
%
% Note that scaling vectors are different to argument order. Argument order
% is the general order the deritives are extracted from the distortion
% equations, and not the scaling vectors. As such the middle two vaules
% may be swapped from what you expect. Caution is advised.
%
% WARNING: It is assumed that any SetResampleFilter() method call will
% always be performed before the ScaleResampleFilter() method, so that the
% size of the ellipse will match the support for the resampling filter being
% used.
%
% The format of the ScaleResampleFilter method is:
%
% void ScaleResampleFilter(const ResampleFilter *resample_filter,
% const double dux,const double duy,const double dvx,const double dvy)
%
% A description of each parameter follows:
%
% o resample_filter: the resampling resample_filterrmation defining the
% image being resampled
%
% o dux,duy,dvx,dvy:
% The deritives or scaling vectors defining the EWA ellipse.
% NOTE: watch the order, which is based on the order deritives
% are usally determined from distortion equations (see above).
% The middle two values may need to be swapped if you are thinking
% in terms of scaling vectors.
%
*/
MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter,
const double dux,const double duy,const double dvx,const double dvy)
{
double A,B,C,F;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->limit_reached = MagickFalse;
/* A 'point' filter forces use of interpolation instead of area sampling */
if ( resample_filter->filter == PointFilter )
return; /* EWA turned off - nothing to do */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "# -----\n" );
(void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n",
dux, dvx, duy, dvy);
#endif
/* Find Ellipse Coefficents such that
A*u^2 + B*u*v + C*v^2 = F
With u,v relative to point around which we are resampling.
And the given scaling dx,dy vectors in u,v space
du/dx,dv/dx and du/dy,dv/dy
*/
#if EWA
/* Direct conversion of derivatives into elliptical coefficients
However when magnifying images, the scaling vectors will be small
resulting in a ellipse that is too small to sample properly.
As such we need to clamp the major/minor axis to a minumum of 1.0
to prevent it getting too small.
*/
#if EWA_CLAMP
{ double major_mag,
minor_mag,
major_x,
major_y,
minor_x,
minor_y;
ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag,
&major_x, &major_y, &minor_x, &minor_y);
major_x *= major_mag; major_y *= major_mag;
minor_x *= minor_mag; minor_y *= minor_mag;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n",
major_x, major_y, minor_x, minor_y);
#endif
A = major_y*major_y+minor_y*minor_y;
B = -2.0*(major_x*major_y+minor_x*minor_y);
C = major_x*major_x+minor_x*minor_x;
F = major_mag*minor_mag;
F *= F; /* square it */
}
#else /* raw unclamped EWA */
A = dvx*dvx+dvy*dvy;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy;
F = dux*dvy-duy*dvx;
F *= F; /* square it */
#endif /* EWA_CLAMP */
#else /* HQ_EWA */
/*
This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his
thesis, which adds a unit circle to the elliptical area so as to do both
Reconstruction and Prefiltering of the pixels in the resampling. It also
means it is always likely to have at least 4 pixels within the area of the
ellipse, for weighted averaging. No scaling will result with F == 4.0 and
a circle of radius 2.0, and F smaller than this means magnification is
being used.
NOTE: This method produces a very blury result at near unity scale while
producing perfect results for strong minitification and magnifications.
However filter support is fixed to 2.0 (no good for Windowed Sinc filters)
*/
A = dvx*dvx+dvy*dvy+1;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy+1;
F = A*C - B*B/4;
#endif
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F);
/* Figure out the various information directly about the ellipse.
This information currently not needed at this time, but may be
needed later for better limit determination.
It is also good to have as a record for future debugging
*/
{ double alpha, beta, gamma, Major, Minor;
double Eccentricity, Ellipse_Area, Ellipse_Angle;
alpha = A+C;
beta = A-C;
gamma = sqrt(beta*beta + B*B );
if ( alpha - gamma <= MagickEpsilon )
Major=MagickMaximumValue;
else
Major=sqrt(2*F/(alpha - gamma));
Minor = sqrt(2*F/(alpha + gamma));
(void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor );
/* other information about ellipse include... */
Eccentricity = Major/Minor;
Ellipse_Area = MagickPI*Major*Minor;
Ellipse_Angle = atan2(B, A-C);
(void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n",
(double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area);
}
#endif
/* If one or both of the scaling vectors is impossibly large
(producing a very large raw F value), we may as well not bother
doing any form of resampling since resampled area is very large.
In this case some alternative means of pixel sampling, such as
the average of the whole image is needed to get a reasonable
result. Calculate only as needed.
*/
if ( (4*A*C - B*B) > MagickMaximumValue ) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse to match the filters support
(that is, multiply F by the square of the support)
Simplier to just multiply it by the support twice!
*/
F *= resample_filter->support;
F *= resample_filter->support;
/* Orthogonal bounds of the ellipse */
resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B));
resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B));
/* Horizontally aligned parallelogram fitted to Ellipse */
resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */
resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n",
resample_filter->Ulimit, resample_filter->Vlimit,
resample_filter->Uwidth, resample_filter->slope );
#endif
/* Check the absolute area of the parallelogram involved.
* This limit needs more work, as it is too slow for larger images
* with tiled views of the horizon.
*/
if ( (resample_filter->Uwidth * resample_filter->Vlimit)
> (4.0*resample_filter->image_area)) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse formula to directly index the Filter Lookup Table */
{ register double scale;
#if FILTER_LUT
/* scale so that F = WLUT_WIDTH; -- hardcoded */
scale = (double)WLUT_WIDTH/F;
#else
/* scale so that F = resample_filter->F (support^2) */
scale = resample_filter->F/F;
#endif
resample_filter->A = A*scale;
resample_filter->B = B*scale;
resample_filter->C = C*scale;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilter() set the resampling filter lookup table based on a
% specific filter. Note that the filter is used as a radial filter not as a
% two pass othogonally aligned resampling filter.
%
% The format of the SetResampleFilter method is:
%
% void SetResampleFilter(ResampleFilter *resample_filter,
% const FilterType filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling resample_filterrmation structure
%
% o filter: the resize filter for elliptical weighting LUT
%
*/
MagickExport void SetResampleFilter(ResampleFilter *resample_filter,
const FilterType filter)
{
ResizeFilter
*resize_filter;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->do_interpolate = MagickFalse;
resample_filter->filter = filter;
/* Default cylindrical filter is a Cubic Keys filter */
if ( filter == UndefinedFilter )
resample_filter->filter = RobidouxFilter;
if ( resample_filter->filter == PointFilter ) {
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
resize_filter = AcquireResizeFilter(resample_filter->image,
resample_filter->filter,MagickTrue,resample_filter->exception);
if (resize_filter == (ResizeFilter *) NULL) {
(void) ThrowMagickException(resample_filter->exception,GetMagickModule(),
ModuleError, "UnableToSetFilteringValue",
"Fall back to Interpolated 'Point' filter");
resample_filter->filter = PointFilter;
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
/* Get the practical working support for the filter,
* after any API call blur factors have been accoded for.
*/
#if EWA
resample_filter->support = GetResizeFilterSupport(resize_filter);
#else
resample_filter->support = 2.0; /* fixed support size for HQ-EWA */
#endif
#if FILTER_LUT
/* Fill the LUT with the weights from the selected filter function */
{ register int
Q;
double
r_scale;
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = (double)
GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale);
/* finished with the resize filter */
resize_filter = DestroyResizeFilter(resize_filter);
}
#else
/* save the filter and the scaled ellipse bounds needed for filter */
resample_filter->filter_def = resize_filter;
resample_filter->F = resample_filter->support*resample_filter->support;
#endif
/*
Adjust the scaling of the default unit circle
This assumes that any real scaling changes will always
take place AFTER the filter method has been initialized.
*/
ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0);
#if 0
/*
This is old code kept as a reference only. Basically it generates
a Gaussian bell curve, with sigma = 0.5 if the support is 2.0
Create Normal Gaussian 2D Filter Weighted Lookup Table.
A normal EWA guassual lookup would use exp(Q*ALPHA)
where Q = distance squared from 0.0 (center) to 1.0 (edge)
and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767
The table is of length 1024, and equates to support radius of 2.0
thus needs to be scaled by ALPHA*4/1024 and any blur factor squared
The it comes from reference code provided by Fred Weinhaus.
*/
r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = exp((double)Q*r_scale);
resample_filter->support = WLUT_WIDTH;
#endif
#if FILTER_LUT
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp single
#endif
{
if (IsStringTrue(GetImageArtifact(resample_filter->image,
"resample:verbose")) != MagickFalse)
{
register int
Q;
double
r_scale;
/* Debug output of the filter weighting LUT
Gnuplot the LUT data, the x scale index has been adjusted
plot [0:2][-.2:1] "lut.dat" with lines
The filter values should be normalized for comparision
*/
printf("#\n");
printf("# Resampling Filter LUT (%d values) for '%s' filter\n",
WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions,
resample_filter->filter) );
printf("#\n");
printf("# Note: values in table are using a squared radius lookup.\n");
printf("# As such its distribution is not uniform.\n");
printf("#\n");
printf("# The X value is the support distance for the Y weight\n");
printf("# so you can use gnuplot to plot this cylindrical filter\n");
printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n");
printf("#\n");
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
printf("%8.*g %.*g\n",
GetMagickPrecision(),sqrt((double)Q)*r_scale,
GetMagickPrecision(),resample_filter->filter_lut[Q] );
printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */
}
/* Output the above once only for each image, and each setting
(void) DeleteImageArtifact(resample_filter->image,"resample:verbose");
*/
}
#endif /* FILTER_LUT */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterInterpolateMethod() sets the resample filter interpolation
% method.
%
% The format of the SetResampleFilterInterpolateMethod method is:
%
% MagickBooleanType SetResampleFilterInterpolateMethod(
% ResampleFilter *resample_filter,const InterpolateMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the interpolation method.
%
*/
MagickExport MagickBooleanType SetResampleFilterInterpolateMethod(
ResampleFilter *resample_filter,const PixelInterpolateMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->interpolate=method;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterVirtualPixelMethod() changes the virtual pixel method
% associated with the specified resample filter.
%
% The format of the SetResampleFilterVirtualPixelMethod method is:
%
% MagickBooleanType SetResampleFilterVirtualPixelMethod(
% ResampleFilter *resample_filter,const VirtualPixelMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the virtual pixel method.
%
*/
MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod(
ResampleFilter *resample_filter,const VirtualPixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->virtual_pixel=method;
if (method != UndefinedVirtualPixelMethod)
(void) SetCacheViewVirtualPixelMethod(resample_filter->view,method);
return(MagickTrue);
}
|
middle6r.c | /*
* Date: 11 December 2015
* Contact: Thomas Peyrin - thomas.peyrin@gmail.com
*/
/*
* Simulation of boomerang analysis for Skinny
* Date: March 21, 2020
* Author: Hosein Hadipour
* Contact: hsn.hadipour@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <stdbool.h>
// #define DEBUG 1
#define Nthreads 16
// Table that encodes the parameters of the various Skinny versions:
// (block size, key size, number of rounds)
//Skinny-64-64: 32 rounds
//Skinny-64-128: 36 rounds
//Skinny-64-192: 40 rounds
//Skinny-128-128: 40 rounds
//Skinny-128-256: 48 rounds
//Skinny-128-384: 56 rounds
int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}};
// Packing of data is done as follows (state[i][j] stands for row i and column j):
// 0 1 2 3
// 4 5 6 7
// 8 9 10 11
//12 13 14 15
// 4-bit Sbox
const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15};
const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15};
// 8-bit Sbox
const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff};
const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff};
// ShiftAndSwitchRows permutation
const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12};
const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14};
// Tweakey permutation
const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7};
const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1};
// round constants
const unsigned char RC[62] = {
0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F,
0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B,
0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E,
0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A,
0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13,
0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28,
0x10, 0x20};
FILE *fic;
void init_prng(int offset) {
//int initial_seed = 0x5EC7F2B0;
//int initial_seed = 0x30051991; My birthday!
unsigned int initial_seed = 10*time(NULL) + 11*offset;
srand(initial_seed); // Initialization, should only be called once. int r = rand();
printf("[+] PRNG initialized to 0x%08X\n", initial_seed);
}
void display_matrix(unsigned char state[4][4], int ver)
{
int i;
unsigned char input[16];
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
for (i = 0; i < 8; i++)
fprintf(fic, "%02x", input[i]);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
for (i = 0; i < 16; i++)
fprintf(fic, "%02x", input[i]);
}
}
void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int k;
fprintf(fic, "S = ");
display_matrix(state, ver);
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
fprintf(fic, " - TK%i = ", k + 1);
display_matrix(keyCells[k], ver);
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state
void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the TWEAKEY permutation
pos = TWEAKEY_P[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
//application of LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function}
void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse TWEAKEY permutation
pos = TWEAKEY_P_inv[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 2; i <= 3; i++)
{
for (j = 0; j < 4; j++)
{
//application of inverse LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
}
// Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state
void AddConstants(unsigned char state[4][4], int r)
{
state[0][0] ^= (RC[r] & 0xf);
state[1][0] ^= ((RC[r] >> 4) & 0x3);
state[2][0] ^= 0x2;
}
// apply the 4-bit Sbox
void SubCell4(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4[state[i][j]];
}
// apply the 4-bit inverse Sbox
void SubCell4_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4_inv[state[i][j]];
}
// apply the 8-bit Sbox
void SubCell8(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8[state[i][j]];
}
// apply the 8-bit inverse Sbox
void SubCell8_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8_inv[state[i][j]];
}
// Apply the ShiftRows function
void ShiftRows(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the ShiftRows permutation
pos = P[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the inverse ShiftRows function
void ShiftRows_inv(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse ShiftRows permutation
pos = P_inv[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the linear diffusion matrix
//M =
//1 0 1 1
//1 0 0 0
//0 1 1 0
//1 0 1 0
void MixColumn(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
state[1][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[3][j] ^= state[2][j];
temp = state[3][j];
state[3][j] = state[2][j];
state[2][j] = state[1][j];
state[1][j] = state[0][j];
state[0][j] = temp;
}
}
// Apply the inverse linear diffusion matrix
void MixColumn_inv(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
temp = state[3][j];
state[3][j] = state[0][j];
state[0][j] = state[1][j];
state[1][j] = state[2][j];
state[2][j] = temp;
state[3][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[1][j] ^= state[2][j];
}
}
// decryption function of Skinny
void dec(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char dummy[4][4] = {{0}};
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
for (i = r - 1; i >= 0; i--)
{
AddKey(dummy, keyCells, ver);
}
#ifdef DEBUG
fprintf(fic, "DEC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = r - 1; i >= 0; i--)
{
MixColumn_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey_inv(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
SubCell4_inv(state);
else
SubCell8_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
}
#ifdef DEBUG
fprintf(fic, "DEC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// encryption function of Skinny
void enc(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
#ifdef DEBUG
fprintf(fic, "ENC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = 0; i < r; i++)
{
if (versions[ver][0] == 64)
SubCell4(state);
else
SubCell8(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after SubCell: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddConstants: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddKey: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
MixColumn(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after MixColumn: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
} //The last subtweakey should not be added
#ifdef DEBUG
fprintf(fic, "ENC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// generate test vectors for all the versions of Skinny
void TestVectors(int ver)
{
unsigned char p[16];
unsigned char c[16];
unsigned char k[48];
int n;
for (n = 1; n < 10; n++)
{
int i;
for (i = 0; i < (versions[ver][0] >> 3); i++)
c[i] = p[i] = rand() & 0xff;
for (i = 0; i < (versions[ver][0] >> 3); i++)
printf("%02x", p[i]);
printf("\n");
for (i = 0; i < (versions[ver][1] >> 3); i++)
k[i] = rand() & 0xff;
fprintf(fic, "TK = ");
for (i = 0; i < (versions[ver][1] >> 3); i++)
fprintf(fic, "%02x", k[i]);
fprintf(fic, "\n");
fprintf(fic, "P = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", p[i]);
fprintf(fic, "\n");
enc(c, k, ver, 10);
fprintf(fic, "C = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n");
dec(c, k, ver, 10);
fprintf(fic, "P' = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n\n");
}
}
int boomerang(int r, int ver, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
int i;
unsigned char p1[16], p2[16];
unsigned char c3[16], c4[16];
unsigned char k1[48], k2[48], k3[48], k4[48];
// randomly choose k1
for (i = 0; i < (versions[ver][1] >> 3); i++)
k1[i] = rand() & 0xff;
// derive k2
for (i = 0; i < (versions[ver][1] >> 3); i++)
k2[i] = k1[i] ^ dk1[i];
// derive k3
for (i = 0; i < (versions[ver][1] >> 3); i++)
k3[i] = k1[i] ^ dk2[i];
// derive k4
for (i = 0; i < (versions[ver][1] >> 3); i++)
k4[i] = k2[i] ^ dk2[i];
int num = 0;
for (int t = 0; t < N3; t++)
{
// randomly choose p1
for (i = 0; i < (versions[ver][0] >> 3); i++)
p1[i] = rand() & 0xff;
// derive p2
for (i = 0; i < (versions[ver][0] >> 3); i++)
p2[i] = p1[i] ^ dp[i];
enc(p1, k1, ver, r);
enc(p2, k2, ver, r);
// derive c3
for (i = 0; i < (versions[ver][0] >> 3); i++)
c3[i] = p1[i] ^ dc[i];
// derive c4
for (i = 0; i < (versions[ver][0] >> 3); i++)
c4[i] = p2[i] ^ dc[i];
dec(c3, k3, ver, r);
dec(c4, k4, ver, r);
bool flag = 1;
for (i = 0; i < (versions[ver][0] >> 3); i++)
if ((c3[i] ^ c4[i]) != dp[i])
flag = 0;
if (flag)
{
num++;
}
}
return num;
}
double send_boomerangs(int R, int ver, int N1, int N2, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
// Parallel execution
int NUM[N1];
int counter;
printf("#Rounds: %d rounds\n", R);
printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %d * %d = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2));
clock_t clock_timer;
double wall_timer;
clock_timer = clock();
wall_timer = omp_get_wtime();
omp_set_num_threads(N1);
#pragma omp parallel for
for (counter = 0; counter < N1; counter++)
{
int num = 0;
int ID = omp_get_thread_num();
init_prng(ID);
for (int j = 0; j < N2; j++)
{
num += boomerang(R, ver, N3, dp, dc, dk1, dk2);
}
NUM[ID] = num;
}
printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC);
printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer);
double sum = 0;
double sum_temp = 1;
for (int i = 0; i < N1; i++)
sum += NUM[i];
printf("sum = %f\n", sum);
sum_temp = (double)(N1 * N2 * N3) / sum;
printf("2^(-%f)\n\n", log(sum_temp) / log(2));
printf("##########################\n");
return sum;
}
void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16])
{
for (int i = 0; i < (versions[ver][0] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48])
{
for (int i = 0; i < (versions[ver][1] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
int main()
{
// srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand();
// init_prng(1);
// //test all versions of Skinny
// for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++)
// {
// sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]);
// fic = fopen(name, "w");
// fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]);
// TestVectors(i);
// fclose(fic);
// printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]);
// }
unsigned char dp[16];
unsigned char dc[16];
unsigned char dk1[48];
unsigned char dk2[48];
// #######################################################################################################
// #######################################################################################################
// ############################## User must change only the following lines ##############################
int n = 1000; // Number of independent experiments
int R = 6; // Number of rounds
int ver = 4; // Determine the version:
// [0 = Skinny-64-64]
// [1 = Skinny-64-128]
// [2 = Skinny-64-192]
// [3 = Skinny-128-128]
// [4 = Skinny-128-256]
// [5 = Skinny-128-384]
char dp_str[] = "00000000000000000000001000000000";
char dc_str[] = "00000000000000000000000000000000";
char dk1_str[] = "0000000000000000000000000000f0000000000000000000000000000000e000";
char dk2_str[] = "fc000000000000000000000000000000fe000000000000000000000000000000";
// #######################################################################################################
// #######################################################################################################
convert_hexstr_to_statearray(ver, dp_str, dp);
convert_hexstr_to_statearray(ver, dc_str, dc);
convert_hexstr_to_tweakarray(ver, dk1_str, dk1);
convert_hexstr_to_tweakarray(ver, dk2_str, dk2);
//########################## Number of queries #########################
int N1 = Nthreads; // Number of parallel threads : N1
int deg1 = 11;
int deg2 = 11;
int N2 = 1 << deg1; // Number of bunches per thread: N2 = 2^(deg1)
int N3 = 1 << deg2; // Number of queries per bunch: N3 = 2^(deg2)
//################### Number of total queries : N1*N2*N3 ###############
double sum = 0;
for (int i = 0; i < n; i++)
{
sum += send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2);
}
printf("\nAverage = 2^(-%0.4f)\n", (log(n) + log(N1) + log(N2) + log(N3) - log(sum))/log(2));
// sum = (double)(n * N1 * N2 * N3) / sum;
// printf("\nAverage = 2^(-%0.2f)\n", log(sum) / log(2));
return 0;
}
|
neuron.h | /*
* Architektury výpočetních systémů (AVS 2019)
* Projekt c. 1 (ANN)
* Login: xstupi00
*/
/**
* @brief Returns output of the neuron as product of inputs, sums and bias
* @param inputSize - number of inputs the neuron
* @param neuronCount - number of neurons in the layer
* @param input - pointer to neuron input array (identical for all neurons in the layer)
* @param weights - pointer to weights for all neurons in the layer
* @param bias - bias value of the neuron
* @param neuronId - ID of the neuron in layer (position)
* @return Output of the neuron
*/
#pragma omp declare simd uniform(inputSize, neuronCount, input, weight) linear(neuronId) simdlen(8) notinbranch
float evalNeuron(
size_t inputSize,
size_t neuronCount,
const float* input,
const float* weight,
float bias,
size_t neuronId
);
|
DRB102-copyprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "omprace.h"
#include <omp.h>
/*
* threadprivate+copyprivate: no data races
*/
#include <stdio.h>
float x=0.0;
int y=0;
#pragma omp threadprivate(x,y)
int main (int argc, char * argv[])
{
omprace_init();
#pragma omp parallel
{
#pragma omp single copyprivate(x,y)
{
x=1.0;
y=1;
}
}
printf ("x=%f y=%d\n", x, y);
omprace_fini();
return 0;
}
|
kode.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int T;
void write_output(char fname[], double** arr, int n ){
FILE *f = fopen(fname, "w");
for( int i = 0; i < n; i++){
for(int j = 0; j < n; j++){
fprintf(f, "%0.12f ", arr[i][j]);
}
fprintf(f, "\n");
}
fclose(f);
}
void crout(double **A, double **L, double **U, int n) {
int i, j, k;
double sum = 0;
for (i = 0; i < n; i++) {
U[i][i] = 1;
}
for (j = 0; j < n; j++) {
for (i = j; i < n; i++) {
sum = 0;
for (k = 0; k < j; k++) {
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
for (i = j; i < n; i++) {
sum = 0;
for(k = 0; k < j; k++) {
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0) {
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
}
}
void crout1(double **A, double **L, double **U, int n) {
#pragma omp parallel for num_threads(T)
for (int i = 0; i < n; i++) {
U[i][i] = 1;
}
for (int j = 0; j < n; j++) {
#pragma omp parallel for
for (int i = j; i < n; i++) {
double sum = 0;
for (int k = 0; k < j; k++) {
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
#pragma omp parallel for
for (int i = j; i < n; i++) {
double sum = 0;
for(int k = 0; k < j; k++) {
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0) {
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
}
}
void crout2(double **A, double **L, double **U, int n) {
#pragma omp parallel sections num_threads(T)
{
#pragma omp section
for (int i = 0; i < n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = n/16; i < 2*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 2*n/16; i < 3*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 3*n/16; i < 4*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 4*n/16; i < 5*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 5*n/16; i < 6*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 6*n/16; i < 7*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 7*n/16; i < 8*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 8*n/16; i < 9*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 9*n/16; i < 10*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 10*n/16; i < 11*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 11*n/16; i < 12*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 12*n/16; i < 13*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 13*n/16; i < 14*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 14*n/16; i < 15*n/16; i++) {
U[i][i] = 1;
}
#pragma omp section
for (int i = 15*n/16; i < n; i++) {
U[i][i] = 1;
}
}
for (int j = 0; j < n; j++) {
double sum1 = 0;
for (int k = 0; k < j; k++) {
sum1 = sum1 + L[j][k] * U[k][j];
}
L[j][j] = A[j][j] - sum1;
U[j][j] = (A[j][j] - sum1) / L[j][j];
#pragma omp parallel sections
{
#pragma omp section
for (int i = j+1 + 0*(n-j-1)/8; i < j+1 + 1*(n-j-1)/8; i++) {
double sum = 0;
for (int k = 0; k < j; k++) {
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
#pragma omp section
for (int i = j+1 + 1*(n-j-1)/8; i < j+1 + 2*(n-j-1)/8; i++) {
double sum = 0;
for (int k = 0; k < j; k++) {
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
#pragma omp section
for (int i = j+1 + 2*(n-j-1)/8; i < j+1 + 3*(n-j-1)/8; i++) {
double sum = 0;
for (int k = 0; k < j; k++) {
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
#pragma omp section
for (int i = j+1 + 3*(n-j-1)/8; i < j+1 + 4*(n-j-1)/8; i++) {
double sum = 0;
for (int k = 0; k < j; k++) {
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
#pragma omp section
for (int i = j+1 + 4*(n-j-1)/8; i < j+1 + 5*(n-j-1)/8; i++) {
double sum = 0;
for (int k = 0; k < j; k++) {
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
#pragma omp section
for (int i = j+1 + 5*(n-j-1)/8; i < j+1 + 6*(n-j-1)/8; i++) {
double sum = 0;
for (int k = 0; k < j; k++) {
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
#pragma omp section
for (int i = j+1 + 6*(n-j-1)/8; i < j+1 + 7*(n-j-1)/8; i++) {
double sum = 0;
for (int k = 0; k < j; k++) {
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
#pragma omp section
for (int i = j+1 + 7*(n-j-1)/8; i < j+1 + 8*(n-j-1)/8; i++) {
double sum = 0;
for (int k = 0; k < j; k++) {
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
#pragma omp section
for (int i = j+1 + 0*(n-j-1)/8; i < j+1 + 1*(n-j-1)/8; i++) {
double sum = 0;
for(int k = 0; k < j; k++) {
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0) {
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
#pragma omp section
for (int i = j+1 + 1*(n-j-1)/8; i < j+1 + 2*(n-j-1)/8; i++) {
double sum = 0;
for(int k = 0; k < j; k++) {
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0) {
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
#pragma omp section
for (int i = j+1 + 2*(n-j-1)/8; i < j+1 + 3*(n-j-1)/8; i++) {
double sum = 0;
for(int k = 0; k < j; k++) {
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0) {
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
#pragma omp section
for (int i = j+1 + 3*(n-j-1)/8; i < j+1 + 4*(n-j-1)/8; i++) {
double sum = 0;
for(int k = 0; k < j; k++) {
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0) {
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
#pragma omp section
for (int i = j+1 + 4*(n-j-1)/8; i < j+1 + 5*(n-j-1)/8; i++) {
double sum = 0;
for(int k = 0; k < j; k++) {
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0) {
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
#pragma omp section
for (int i = j+1 + 5*(n-j-1)/8; i < j+1 + 6*(n-j-1)/8; i++) {
double sum = 0;
for(int k = 0; k < j; k++) {
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0) {
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
#pragma omp section
for (int i = j+1 + 6*(n-j-1)/8; i < j+1 + 7*(n-j-1)/8; i++) {
double sum = 0;
for(int k = 0; k < j; k++) {
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0) {
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
#pragma omp section
for (int i = j+1 + 7*(n-j-1)/8; i < j+1 + 8*(n-j-1)/8; i++) {
double sum = 0;
for(int k = 0; k < j; k++) {
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0) {
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
}
}
}
void crout3(double **A, double **L, double **U, int n) {
#pragma omp parallel for num_threads(T)
for (int i = 0; i < n; i++) {
U[i][i] = 1;
}
for (int j = 0; j < n; j++) {
double sum1 = 0;
for (int k = 0; k < j; k++) {
sum1 = sum1 + L[j][k] * U[k][j];
}
L[j][j] = A[j][j] - sum1;
U[j][j] = (A[j][j] - sum1) / L[j][j];
#pragma omp parallel sections
{
#pragma omp section
{
#pragma omp parallel for
for (int i = j+1; i < n; i++) {
double sum = 0;
for (int k = 0; k < j; k++) {
sum = sum + L[i][k] * U[k][j];
}
L[i][j] = A[i][j] - sum;
}
}
#pragma omp section
{
#pragma omp parallel for
for (int i = j+1; i < n; i++) {
double sum = 0;
for(int k = 0; k < j; k++) {
sum = sum + L[j][k] * U[k][i];
}
if (L[j][j] == 0) {
exit(0);
}
U[j][i] = (A[j][i] - sum) / L[j][j];
}
}
}
}
}
int main(int argc, char* argv[]){
if (argc<4){
printf("ERROR\n");
return 0;
}
int n = atoi(argv[1]);
char *filename = argv[2];
T = atoi(argv[3]);
int strat = atoi(argv[4]);
// omp_set_num_threads(T);
omp_set_nested(1);
FILE *fptr = fopen(filename,"r");
double **A = (double **)malloc(n*sizeof(double *));
for(int i=0;i<n;i++){
A[i] = (double *)malloc(n*sizeof(double));
}
double **L = (double **)malloc(n*sizeof(double *));
for(int i=0;i<n;i++){
L[i] = (double *)malloc(n*sizeof(double));
}
double **U = (double **)malloc(n*sizeof(double *));
for(int i=0;i<n;i++){
U[i] = (double *)malloc(n*sizeof(double));
}
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
fscanf(fptr,"%lf",&A[i][j]);
}
}
fclose(fptr);
if(strat==0){
crout(A,L,U,n);
}
else if(strat==1){
crout1(A,L,U,n);
}
else if(strat==2){
crout2(A,L,U,n);
}
else if(strat==3){
crout3(A,L,U,n);
}
char linit[] = "output_L_";
char uinit[] = "output_U_";
char score[] = "_";
char ext[] = ".txt";
char* lfile = (char *)malloc(300);
lfile[0] = '\0';
strcat(lfile,linit); strcat(lfile,argv[4]); strcat(lfile,score); strcat(lfile,argv[3]); strcat(lfile,ext);
char* ufile = (char *)malloc(300);
ufile[0] = '\0';
strcat(ufile,uinit); strcat(ufile,argv[4]); strcat(ufile,score); strcat(ufile,argv[3]); strcat(ufile,ext);
write_output(lfile,L,n);
write_output(ufile,U,n);
return 0;
} |
yolo_secondlast.back.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose_avx512.h"
#include "ukr10x2vCnnb1f1024x17y17c512r1s1.h"
#include "ukr10x2vGemmb1f1024x17y17c512r1s1AS.h"
#include "ukr9x2vGemmb1f1024x17y17c512r1s1.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
#pragma omp parallel num_threads(18)
{
int tid = omp_get_thread_num();
int Nx = 17;
int Ny = 17;
int Nh = 1;
int Astrides[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
int b1 = 0;
for (int fpck = (tid%18)*16; fpck < uNf; fpck+=18*16){
for(int cwh = (tid/18)*16; cwh < uNc*uNw*uNh/16*16; cwh+=16*1){
transpose16x16_avx512(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int c5=0;c5<512+0;c5+=512)
{
for(int xy5=0;xy5<289+0;xy5+=289)
{
for(int f5=0;f5<1024+0;f5+=1024)
{
for(int c4=c5;c4<min(512, 512+c5);c4+=512)
{
for(int xy4=xy5;xy4<min(289, 289+xy5);xy4+=289)
{
for(int f4=f5;f4<min(1024, 1024+f5);f4+=1024)
{
for(int c3=c4;c3<min(512, 512+c4);c3+=144)
{
for(int f3=f4+tid%18*32;f3<min(1024, 1024+f4);f3+=32*18)
{
for(int xy3=xy4;xy3<min(289, 289+xy4);xy3+=270)
{
for(int xy2=xy3;xy2<min(289, 270+xy3);xy2+=10)
{
for(int f2=f3;f2<min(1024, 32+f3);f2+=32)
{
for(int c2=c3;c2<min(512, 144+c3);c2+=144)
{
for(int c1=c2;c1<min(512, 144+c2);c1+=144)
{
for(int xy1=xy2;xy1<min(289, 10+xy2);xy1+=10)
{
for(int f1=f2;f1<min(1024, 32+f2);f1+=32)
{
int ctile=min(144, 512-c1);
int x1=xy1/17;
int y1=xy1%17/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*147968+c1_1*289+1*x1*17+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*8192+c1*16+0*16+0*16+kf1_2*1;
int offsetC=0+b1*295936+of1_1*289+x1*17+y1*1+of1_2*1;
if(17-y1>=10){
ukr10x2vCnnb1f1024x17y17c512r1s1(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(17*17-xy1>=10){
for(int sti=17-y1;sti<10;sti+=1)
{
Astrides[sti]+=0;
}
ukr10x2vGemmb1f1024x17y17c512r1s1AS(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=17-y1;sti<10;sti+=1)
{
Astrides[sti]-=0;
}
}
else{
ukr9x2vGemmb1f1024x17y17c512r1s1(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
}} |
GB_binop__rminus_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__rminus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__rminus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint8)
// A*D function (colscale): GB (_AxD__rminus_uint8)
// D*A function (rowscale): GB (_DxB__rminus_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint8)
// C=scalar+B GB (_bind1st__rminus_uint8)
// C=scalar+B' GB (_bind1st_tran__rminus_uint8)
// C=A+scalar GB (_bind2nd__rminus_uint8)
// C=A'+scalar GB (_bind2nd_tran__rminus_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_UINT8 || GxB_NO_RMINUS_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rminus_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
openmp_utils.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#ifndef KRATOS_OPENMP_UTILS_H
#define KRATOS_OPENMP_UTILS_H
#include <stdio.h>
#include <vector>
#include <iostream>
#ifdef _OPENMP
#include <omp.h>
#else
#include <ctime>
#endif
namespace Kratos
{
///@addtogroup KratosCore
///@{
///@name Kratos Classes
///@{
/// Implements basic tasks for OpenMP parallelism and suitable scalar alternatives
/**
This class defines utility functions that implement some basic OpenMP
capabilities and an equivalent scalar alternative to use in compilations
where OpenMP is not enabled. The idea is to allow Kratos developers to
design their code in parallel, knowing that it will work in scalar runs
as well.
*/
class OpenMPUtils
{
public:
///@name Type definitions
///@{
/// Vector type for the output of DivideInPartitions method
/**
* @see OpenMPUtils::DivideInPartitions
*/
typedef std::vector<int> PartitionVector;
///@}
///@name Operations
///@{
/// Wrapper for omp_get_max_threads().
/**
@return Maximum number of OpenMP threads that will be used in
parallel regions.
*/
static inline int GetNumThreads()
{
#ifdef _OPENMP
return omp_get_max_threads();
#else
return 1;
#endif
}
/// Wrapper for omp_get_thread_num().
/**
@return The thread number for this thread, 0 if scalar run.
*/
static inline int ThisThread()
{
#ifdef _OPENMP
return omp_get_thread_num();
#else
return 0;
#endif
}
/// Wrapper for omp_in_parallel().
/**
@return Maximum number of OpenMP threads that will be used in
parallel regions.
*/
static inline int IsInParallel()
{
#ifdef _OPENMP
return omp_in_parallel();
#else
return 0;
#endif
}
/// Timing routine.
/**
Determine the current time by calling an appropiate
(scalar or parallel) timer class.
@return Current time
*/
static double GetCurrentTime()
{
#ifndef _OPENMP
return std::clock()/static_cast<double>(CLOCKS_PER_SEC);
#else
return omp_get_wtime();
#endif
}
/// Divide an array of length NumTerms between NumThreads threads.
/**
Creates a std::vector containing NumThreads + 1 terms, where term k
is the first and position of the array that corresponds to thread k.
The k+1 term is the end of the array, so that the vector can be used
to iterate the array between 'k' and 'k+1' in each thread.
@param NumTerms Number of objects to be divided between the threads.
@param NumThreads The number of parallel threads that will be used.
@param Partitions This object will contain the begin and end positions
for each thread.
*/
static inline void DivideInPartitions(
const int NumTerms,
const int NumThreads,
PartitionVector& Partitions)
{
Partitions.resize(NumThreads + 1);
int PartitionSize = NumTerms / NumThreads;
Partitions[0] = 0;
Partitions[NumThreads] = NumTerms;
for(int i = 1; i < NumThreads; i++)
Partitions[i] = Partitions[i-1] + PartitionSize ;
}
/// Generate a partition for an std::vector-like array, providing iterators to the begin and end positions for each thread.
/** This function assumes that the vector class will have an iterator type and implement begin(), end() and size() methods.
* @param rVector An arary containing the elements to be distributed between the threads.
* @param rBegin Iterator pointing to the first element in rVector to be used in the current thread.
* @param rEnd Iterator pointing to the end position for the current thread in rVector.
*/
template< class TVector >
static void PartitionedIterators(TVector& rVector,
typename TVector::iterator& rBegin,
typename TVector::iterator& rEnd)
{
#ifdef _OPENMP
int NumTerms = rVector.size();
int ThreadNum = omp_get_thread_num();
int NumThreads = omp_get_max_threads();
int PartitionSize = NumTerms / NumThreads;
// Set Partition start
rBegin = rVector.begin() + ThreadNum * PartitionSize;
// Partition ends after 'PartitionSize' terms, except if this is the last partition
if ( (ThreadNum + 1) != NumThreads )
rEnd = rBegin + PartitionSize;
else
rEnd = rVector.end();
#else
rBegin = rVector.begin();
rEnd = rVector.end();
#endif
}
/// A function to set the number of threads from Python.
/**
This is an auxiliary mainly intended for test purposes, to help with the
detection of race conditions.
@param NumThreads Number of threads to use in parallel regions. Note
that values greater than the environment variable OMP_NUM_THREADS
will be ignored.
*/
static inline void SetNumThreads(int NumThreads = 1)
{
#ifdef _OPENMP
int procs = omp_get_num_procs();
if( procs < NumThreads ){
std::cout<<" WARNING: Maximimun number of threads is EXCEEDED "<<std::endl;
/* Set thread number */
omp_set_num_threads(procs);
std::cout<<" Number of Threads Set To : "<<procs<<std::endl;
}
else{
/* Set thread number */
omp_set_num_threads(NumThreads);
}
#endif
}
/**
A method to print the OMP information
*/
static inline void PrintOMPInfo()
{
#ifdef _OPENMP
int nthreads,tid, procs, maxt, inpar, dynamic, nested;
/* Start parallel region */
#pragma omp parallel private(nthreads, tid)
{
/* Obtain thread number */
tid = omp_get_thread_num();
/* Only master thread does this */
if (tid == 0)
{
printf(" Thread %d getting environment info...\n", tid);
/* Get environment information */
procs = omp_get_num_procs();
nthreads = omp_get_num_threads();
maxt = omp_get_max_threads();
inpar = omp_in_parallel();
//omp_set_dynamic(true);
dynamic = omp_get_dynamic();
//omp_set_nested(true);
nested = omp_get_nested();
/* Print environment information */
printf( " | ------------ OMP IN USE --------- |\n");
printf( " | Machine number of processors = %d |\n", procs);
printf( " | Number of threads set = %d |\n", nthreads);
printf( " | Max threads in use = %d |\n", maxt);
printf( " | In parallel? = %d |\n", inpar);
printf( " | Dynamic threads enabled? = %d |\n", dynamic);
printf( " | Nested parallelism supported? = %d |\n", nested);
printf( " | --------------------------------- |\n");
if( procs < nthreads )
std::cout<<" ( WARNING: Maximimun number of threads is EXCEEDED )"<<std::endl;
}
}
#endif
}
template<class T>
static inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, T& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(unsigned int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
///@} //Operations
};
///@} //Kratos classes
///@} addtogroup block
}
#endif /* KRATOS_OPENMP_UTILS_H */
|
GB_unaryop__abs_int16_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int16_fp32
// op(A') function: GB_tran__abs_int16_fp32
// C type: int16_t
// A type: float
// cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16)
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int16_t z ; GB_CAST_SIGNED(z,x,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int16_fp32
(
int16_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int16_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_sgemm_packn_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_packn_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
// Mat bottom_im2col(size, maxk, inch, 2u * packn, packn, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const __fp16* bias = _bias;
// permute
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 2u * packn, packn, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = size >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
__fp16* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr[2] = img0[l + packn * 2];
tmpptr[3] = img0[l + packn * 3];
tmpptr[4] = img0[l + packn * 4];
tmpptr[5] = img0[l + packn * 5];
tmpptr[6] = img0[l + packn * 6];
tmpptr[7] = img0[l + packn * 7];
tmpptr += 8;
}
img0 += size * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl);
vfloat16m1_t _val2 = vle16_v_f16m1(img0 + packn * 2, vl);
vfloat16m1_t _val3 = vle16_v_f16m1(img0 + packn * 3, vl);
vfloat16m1_t _val4 = vle16_v_f16m1(img0 + packn * 4, vl);
vfloat16m1_t _val5 = vle16_v_f16m1(img0 + packn * 5, vl);
vfloat16m1_t _val6 = vle16_v_f16m1(img0 + packn * 6, vl);
vfloat16m1_t _val7 = vle16_v_f16m1(img0 + packn * 7, vl);
vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl);
img0 += size * packn;
tmpptr += packn * 8;
#endif
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr[2] = img0[l + packn * 2];
tmpptr[3] = img0[l + packn * 3];
tmpptr += 4;
}
img0 += size * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl);
vfloat16m1_t _val2 = vle16_v_f16m1(img0 + packn * 2, vl);
vfloat16m1_t _val3 = vle16_v_f16m1(img0 + packn * 3, vl);
vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl);
img0 += size * packn;
tmpptr += packn * 4;
#endif
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr += 2;
}
img0 += size * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl);
vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl);
img0 += size * packn;
tmpptr += packn * 2;
#endif
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
vfloat16m1_t _val = vle16_v_f16m1(img0, vl);
vse16_v_f16m1(tmpptr, _val, vl);
img0 += size * packn;
tmpptr += packn;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr0 = top_blob.channel(p);
int i = 0;
for (; i + 7 < size; i += 8)
{
const __fp16* tmpptr = tmp.channel(i / 8);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch * maxk * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl);
if (bias)
{
_sum0 = vle16_v_f16m1(bias + p * packn, vl);
_sum1 = vle16_v_f16m1(bias + p * packn, vl);
_sum2 = vle16_v_f16m1(bias + p * packn, vl);
_sum3 = vle16_v_f16m1(bias + p * packn, vl);
_sum4 = vle16_v_f16m1(bias + p * packn, vl);
_sum5 = vle16_v_f16m1(bias + p * packn, vl);
_sum6 = vle16_v_f16m1(bias + p * packn, vl);
_sum7 = vle16_v_f16m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *tmpptr++;
__fp16 val1 = *tmpptr++;
__fp16 val2 = *tmpptr++;
__fp16 val3 = *tmpptr++;
__fp16 val4 = *tmpptr++;
__fp16 val5 = *tmpptr++;
__fp16 val6 = *tmpptr++;
__fp16 val7 = *tmpptr++;
vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl);
kptr0 += packn;
}
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr0 + packn, _sum1, vl);
vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl);
vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl);
vse16_v_f16m1(outptr0 + packn * 4, _sum4, vl);
vse16_v_f16m1(outptr0 + packn * 5, _sum5, vl);
vse16_v_f16m1(outptr0 + packn * 6, _sum6, vl);
vse16_v_f16m1(outptr0 + packn * 7, _sum7, vl);
outptr0 += packn * 8;
}
for (; i + 3 < size; i += 4)
{
const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch * maxk * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl);
if (bias)
{
_sum0 = vle16_v_f16m1(bias + p * packn, vl);
_sum1 = vle16_v_f16m1(bias + p * packn, vl);
_sum2 = vle16_v_f16m1(bias + p * packn, vl);
_sum3 = vle16_v_f16m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *tmpptr++;
__fp16 val1 = *tmpptr++;
__fp16 val2 = *tmpptr++;
__fp16 val3 = *tmpptr++;
vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl);
kptr0 += packn;
}
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr0 + packn, _sum1, vl);
vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl);
vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl);
outptr0 += packn * 4;
}
for (; i + 1 < size; i += 2)
{
const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch * maxk * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
if (bias)
{
_sum0 = vle16_v_f16m1(bias + p * packn, vl);
_sum1 = vle16_v_f16m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *tmpptr++;
__fp16 val1 = *tmpptr++;
vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
kptr0 += packn;
}
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr0 + packn, _sum1, vl);
outptr0 += packn * 2;
}
for (; i < size; i++)
{
const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch * maxk * packn; // inch always > 0
vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
if (bias)
{
_sum = vle16_v_f16m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
__fp16 val = *tmpptr++;
vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl);
_sum = vfmacc_vf_f16m1(_sum, val, _w0, vl);
kptr0 += packn;
}
vse16_v_f16m1(outptr0, _sum, vl);
outptr0 += packn;
}
}
}
static void convolution_im2col_sgemm_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 2u * packn, packn, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * packn;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
__fp16* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v * packn;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
vfloat16m1_t _val = vle16_v_f16m1(sptr, vl);
vse16_v_f16m1(ptr, _val, vl);
sptr += stride_w * packn;
ptr += packn;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_packn_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
simd.c | #include <stdio.h>
#define N 100
int main()
{
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
int k;
#pragma omp simd
for(k=0; k<N; k++)
a[k] = k;
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
int first = -1;
int last = - 1;
for(i=0; i<N; i++) {
if (a[i] != aa[i]) {
if (first == -1) first = i;
last = i;
++error;
}
}
if (error) {
if (error == 1)
printf("one mismatch: [index:%d]: a %d != %d\n", first, a[first], aa[first]);
else {
printf("first mismatch: [index:%d]: a %d != %d (total errors: %d)\n", first, a[first], aa[first], error);
printf("last mismatch: [index:%d]: a %d != %d (total errors %d)\n", last, a[last], aa[last], error);
}
return 0;
}
// report
printf("Done with %d errors\n", error);
return error;
}
|
threadpool.h | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/* Modifications Copyright (c) Microsoft. */
#pragma once
#include <string>
#include <vector>
#include <functional>
#include <memory>
#include "core/common/common.h"
#include "core/platform/env.h"
#include <functional>
#include <memory>
// ORT thread pool overview
// ------------------------
//
// The ORT thread pool implementation is split into two layers. This
// file provides the high-level component. See the accompanying
// comments in EigenNonBlockingThreadPool.h for the low-level
// component.
//
// threadpool.h defines the user-facing functions for use in
// operators. The main abstraction are parallel loops
// (ThreadPool::TryParallelFor*), although we also support scheduling
// of asynchronous tasks (ThreadPool::Schedule), and the construction
// of multi-loop parallel sections (ThreadPool::ParallelSection).
//
// This high level API is accessed via static methods on the
// ThreadPool class. These methods map the operations onto one of
// three low-level implementations: (#1) direct execution of the
// operations if there is no thread pool configured, (#2) execution of
// the operations using the modified Eigen threadpool, (#3) execution
// of the operations using OpenMP. Option #1 enables execution in
// simple settings without needing threads. Option #2 is the
// preferred approach for use in settings with parallelism.
//
// The high-level part of the thread pool is responsible for:
//
// - Exposing the desired degree of parallelism to user code, and to
// libraries such as MLAS. This lets the libraries tailor the
// extent to which they parallelize work.
//
// - Handling trivial cases (such as directly running parallel loops
// with only a single iteration, or with no iterations at all).
//
// - Deciding how to divide work efficiently between the threads
// available.
//
// The ThreadPool::TryParallelFor methods do this based on cost
// estimates supplied by the caller, and are designed to support
// loops with small amounts of work per iteration. The loop body is
// supplied as a function taking a [start,end) range of iterations
// to execute (avoiding the need for per-iteration std::function
// calls, or a reliance upon inlining to avoid those calls).
//
// ThreadPool::TrySimpleParallelFor uses a simpler single-iteration
// API based on the assumption that the caller has divided work to
// an appropriate granularity.
//
// - When used with the Eigen-based thread pool, the implementation of
// all of the loops maps down onto
// ThreadPool::ParallelForFixedBlockSizeScheduling. This method
// takes the degree of parallelism (d_of_p) and work distribution
// block size (from the cost-based heuristics), and creates a set of
// tasks in the underlying thread pool (via
// ThreadPool::RunInParallel).
//
// These tasks then run a loop which picks off batches of iterations
// from the user's code. The distribution of these batches is
// handled dynmamically via LoopCounter::ClaimIterations. This
// dynamic balancing behavior helps make performance robust to any
// variability in the execution time across iterations, and to
// situations such as multiple loops running concurrently on the
// same thread pool.
//
// - When running a series of loops inside a parallel section, the
// LoopCounter also helps obtain affinity between these loops (i.e.,
// iteration X of one loop will tend to run on the same thread that
// ran iteration X of prior loops). This locality helps improve hit
// rates in per-core caches across the series of short loops used in
// operators like GRU.
//
// There are some known areas for exploration here:
//
// - The cost-based heuristics were developed prior to recent changes
// to the thread pool. The heuristics seem to work well, but we
// should revisit the tuning periodically.
//
// - Can we unify the APIs for the different kinds of parallel loop?
//
// In particular, we may be able to replace the current use of
// TryBatchParallelFor with appropriate costs for each call site,
// and then use TryParallelFor. This would allow for more dynamic
// re-balancing of work between threads than the current
// ThreadPool::PartitionWork function provides.
//
// - Given the extensive modifications to original Eigen code, should
// we separate that out as a new class and remove the dependence on
// other Eigen components.
// This file use PIMPL to avoid having eigen headers here
namespace Eigen {
class Allocator;
class ThreadPoolInterface;
} // namespace Eigen
namespace onnxruntime {
struct TensorOpCost {
double bytes_loaded;
double bytes_stored;
double compute_cycles;
};
namespace concurrency {
template <typename Environment>
class ThreadPoolTempl;
class ExtendedThreadPoolInterface;
class LoopCounter;
class ThreadPoolParallelSection;
class ThreadPool {
public:
#ifdef _WIN32
using NAME_CHAR_TYPE = wchar_t;
#else
using NAME_CHAR_TYPE = char;
#endif
// Constructs a pool for running with with "degree_of_parallelism" threads with
// specified "name". env->StartThread() is used to create individual threads
// with the given ThreadOptions. If "low_latency_hint" is true the thread pool
// implementation may use it as a hint that lower latency is preferred at the
// cost of higher CPU usage, e.g. by letting one or more idle threads spin
// wait. Conversely, if the threadpool is used to schedule high-latency
// operations like I/O the hint should be set to false.
// 如果“low_latency_hint”为真,线程池实现可能会使用它作为提示,以较高的 CPU 使用率为代价,优先选择较低的延迟,例如 通过让一个或多个空闲线程旋转等待。 相反,如果线程池用于调度 I/O 等高延迟操作,则提示应设置为 false。
//
// REQUIRES: degree_of_parallelism > 0
ThreadPool(Env* env,
const ThreadOptions& thread_options,
const NAME_CHAR_TYPE* name,
int degree_of_parallelism,
bool low_latency_hint);
// Waits until all scheduled work has finished and then destroy the
// set of threads.
~ThreadPool();
// Start and end a multi-loop parallel section. Parallel loops can
// be executed directly (without using this API), but entering a
// parallel section allows the runtime system to amortize loop
// entry/exit costs over multiple loops, and allows it to promote
// affinity between corresponding iterations of different loops.
//
// Multi-loop sections would typically be used in cases where a
// series of loops executes without much code in between them, and
// where it is impractical to refactor code into a single loop. For
// instance:
//
// {
// onnxruntime::concurrency::ThreadPoool::ParallelSection ps(tp);
// for (int x = 0; x < seq_len; x++) {
// TrySimpleParallelFor(tp, 16, [&]() { ... });
// }
// }
//
// The parallel section is entered via the constructor of
// ThreadPool::ParallelSection, and exited via the destructor.
// Currently, thread-local state is used to track whether or not the
// current thread is inside a parallel section. In contrast to
// handling parallel section objects explicitly in user code, this
// approach allows code such as MLAS to operate with/without the use
// of parallel sections.
//
// Parallel sections are only implemented with the Eigen threadpool.
// They have no effect when using OpenMP.
//
// Parallel sections may not be nested, and may not be used inside
// parallel loops.
class ParallelSection {
public:
explicit ParallelSection(ThreadPool *tp);
~ParallelSection();
private:
friend class ThreadPool;
// Owning reference for the underlying ThreadPoolParallelSection
// which implements the thread management. We use an explicit
// deleter here so that the definition of
// ThreadPoolParallelSection does not need to be available at this
// point to avoid a dependence on the Eigen headers.
std::unique_ptr<ThreadPoolParallelSection, void(*)(ThreadPoolParallelSection*)>
ps_{nullptr, [](ThreadPoolParallelSection*){}};
#ifndef _OPENMP
ThreadPool *tp_;
#endif
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ParallelSection);
// Non-owning reference to the current thread's paralel section
// (or nullptr outside parallel sections).
static thread_local ParallelSection *current_parallel_section;
static_assert(std::is_trivially_destructible<decltype(current_parallel_section)>::value,
"Per-thread state should be trivially destructible");
};
// Schedules fn() for execution in the pool of threads. The function may run
// synchronously if it cannot be enqueued. This will occur if the thread pool's
// degree-of-parallelism is 1, but it may also occur for implementation-dependent
// reasons such as if queues used for buffering work are full.
static void Schedule(ThreadPool* tp,
std::function<void()> fn) {
if (tp) {
tp->Schedule(fn);
} else {
fn();
}
}
// ParallelFor shards the "total" units of work assuming each unit of work
// having roughly "cost_per_unit" cost, in cycles. Each unit of work is
// indexed 0, 1, ..., total - 1. Each shard contains 1 or more units of work
// and the total cost of each shard is roughly the same.
//
// "cost_per_unit" is an estimate of the number of CPU cycles (or nanoseconds
// if not CPU-bound) to complete a unit of work. Overestimating creates too
// many shards and CPU time will be dominated by per-shard overhead, such as
// Context creation. Underestimating may not fully make use of the specified
// parallelism, and may also cause inefficiencies due to load balancing
// issues and stragglers.
static void TryParallelFor(ThreadPool* tp, std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) {
TryParallelFor(tp, total, TensorOpCost{0, 0, static_cast<double>(cost_per_unit)}, fn);
}
static void TryParallelFor(ThreadPool* tp, std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn);
// Directly schedule the 'total' tasks to the underlying threadpool, without
// cutting them by halves
inline static void TrySimpleParallelFor(ThreadPool* tp, std::ptrdiff_t total,
const std::function<void(std::ptrdiff_t)>& fn) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(tp);
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < total; ++i) {
fn(i);
}
#else
if (tp != nullptr) {
tp->SimpleParallelFor(total, fn);
} else {
for (std::ptrdiff_t i = 0; i < total; ++i) {
// In many cases, fn can be inlined here.
fn(i);
}
}
#endif
}
/**
* Tries to call the given function in parallel, with calls split into (num_batches) batches.
*\param num_batches If it is zero, it will be replaced to the value of DegreeOfParallelism().
*\param fn A std::function or STL style functor with signature of "void f(std::ptrdiff_t);"
* Pitfall: Caller should cap `num_batches` to a reasonable value based on the cost of `fn` and the value of `total`.
*For example, if fn is as simple as: int sum=0; fn = [&](int i){sum +=i;} and `total` is 100, then num_batches should
*be just 1.
*
* ```
**/
template <typename F>
inline static void TryBatchParallelFor(ThreadPool* tp, std::ptrdiff_t total, F&& fn, std::ptrdiff_t num_batches) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(tp);
ORT_UNUSED_PARAMETER(num_batches);
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < total; ++i) {
fn(i);
}
#else
if (tp == nullptr) {
for (std::ptrdiff_t i = 0; i < total; ++i) {
// In many cases, fn can be inlined here.
fn(i);
}
return;
}
if (total <= 0)
return;
if (total == 1) {
fn(0);
return;
}
if (num_batches <= 0) {
num_batches = std::min<std::ptrdiff_t>(total, DegreeOfParallelism(tp));
}
if (num_batches <= 1) {
for (int i = 0; i < total; i++) {
fn(i);
}
return;
}
tp->SimpleParallelFor(num_batches, [&](std::ptrdiff_t batch_index) {
auto work = PartitionWork(batch_index, num_batches, total);
for (std::ptrdiff_t i = work.start; i < work.end; i++) {
fn(i);
}
});
#endif
}
struct WorkInfo {
std::ptrdiff_t start;
std::ptrdiff_t end;
};
/** Calculate the start and end offsets for a batch.
@remarks Based on MlasPartitionWork
*/
static WorkInfo PartitionWork(std::ptrdiff_t batch_idx, std::ptrdiff_t num_batches, std::ptrdiff_t total_work) {
const std::ptrdiff_t work_per_batch = total_work / num_batches;
const std::ptrdiff_t work_per_batch_extra = total_work % num_batches;
WorkInfo info;
if (batch_idx < work_per_batch_extra) {
info.start = (work_per_batch + 1) * batch_idx;
info.end = info.start + work_per_batch + 1;
} else {
info.start = work_per_batch * batch_idx + work_per_batch_extra;
info.end = info.start + work_per_batch;
}
return info;
}
//......................................................................
//
// The following static methods take into account whether OpenMP is
// enabled/disabled, and if the thread pool pointer is nullptr
// during sequential execution.
// Provide a hint to the caller for whether or not to parallelize
// work. This lets a caller switch to a sequential version of an
// algorithm rather than using calls via the ParallelFor functions.
static bool ShouldParallelize(const ThreadPool* tp);
// Return the degree of parallelism that code should assume when using the thread pool.
// It decouples the degree of parallelism for use with the thread pool from
// the implementation choice of whether this matches the number of threads created in
// the pool.
//
// Currently, a loop with degree-of-parallelism N is supported by a pool of N-1 threads
// working in combination with the thread initiating the loop.
static int DegreeOfParallelism(const ThreadPool* tp);
ORT_DISALLOW_COPY_AND_ASSIGNMENT(ThreadPool);
// StartProfiling and StopProfiling are not to be consumed as public-facing API
static void StartProfiling(concurrency::ThreadPool* tp);
static std::string StopProfiling(concurrency::ThreadPool* tp);
private:
friend class LoopCounter;
// Returns the number of threads created in the pool. This may be different from the
// value returned by DegreeOfParallelism to code using the pool.
int NumThreads() const;
// Returns current thread id between 0 and NumThreads() - 1, if called from a
// thread in the pool. Returns -1 otherwise.
int CurrentThreadId() const;
// Run fn with up to n degree-of-parallelism enlisting the thread pool for
// help. The degree-of-parallelism includes the caller, and so if n==1
// then the function will run directly in the caller. The fork-join
// synchronization is handled in the thread pool, and so any state captured
// by fn() is safe from concurrent access once RunWithHelp returns.
void RunInParallel(std::function<void(unsigned idx)> fn, unsigned n, std::ptrdiff_t block_size);
// Divides the work represented by the range [0, total) into k shards.
// Calls fn(i*block_size, (i+1)*block_size) from the ith shard (0 <= i < k).
// Each shard may be executed on a different thread in parallel, depending on
// the number of threads available in the pool.
// When (i+1)*block_size > total, fn(i*block_size, total) is called instead.
// Requires 0 < block_size <= total.
void ParallelForFixedBlockSizeScheduling(std::ptrdiff_t total, std::ptrdiff_t block_size,
const std::function<void(std::ptrdiff_t, std::ptrdiff_t)>& fn);
// Return whether or not the calling thread should run a loop of
// num_iterations divided in chunks of block_size in parallel. If not,
// the caller should run the loop sequentially.
bool ShouldParallelizeLoop(const std::ptrdiff_t num_iterations,
const std::ptrdiff_t block_size = 1) const;
// Internal (non-static) parallel loop methods. Unlike the public static methods,
// these will not handle the cases of OpenMP builds. or builds without a threadpool.
void ParallelFor(std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn);
void ParallelFor(std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t)>& fn);
void SimpleParallelFor(std::ptrdiff_t total, const std::function<void(std::ptrdiff_t)>& fn);
void Schedule(std::function<void()> fn);
void StartProfiling();
std::string StopProfiling();
ThreadOptions thread_options_;
// If a thread pool is created with degree_of_parallelism != 1 then an underlying
// EigenThreadPool is used to create OS threads and handle work distribution to them.
// If degree_of_parallelism == 1 then underlying_threadpool_ is left as nullptr
// and parallel work is run directly by the caller.
ExtendedThreadPoolInterface* underlying_threadpool_ = nullptr;
// If used, underlying_threadpool_ is instantiated and owned by the ThreadPool.
std::unique_ptr<ThreadPoolTempl<Env> > extended_eigen_threadpool_;
};
} // namespace concurrency
} // namespace onnxruntime
|
GB_unop__identity_uint8_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_int16)
// op(A') function: GB (_unop_tran__identity_uint8_int16)
// C type: uint8_t
// A type: int16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_int16)
(
uint8_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bshift_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint32)
// C=scalar+B GB (_bind1st__bshift_uint32)
// C=scalar+B' GB (_bind1st_tran__bshift_uint32)
// C=A+scalar GB (_bind2nd__bshift_uint32)
// C=A'+scalar GB (_bind2nd_tran__bshift_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_bitshift_uint32 (aij, bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_uint32 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT32 || GxB_NO_BSHIFT_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bshift_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bshift_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bshift_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_uint32 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_uint32 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint32 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint32 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mkl_util.h | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#ifdef INTEL_MKL
#include <string>
#include <vector>
#include <unordered_map>
#include <utility>
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#include "mkl_service.h"
#include "mkl_trans.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
#ifndef INTEL_MKL_ML
#include "mkldnn.hpp"
using mkldnn::engine;
using mkldnn::memory;
using mkldnn::padding_kind;
using mkldnn::primitive;
using mkldnn::reorder;
#endif
#ifdef _WIN32
typedef unsigned int uint;
#endif
// The file contains a number of utility classes and functions used by MKL
// enabled kernels
namespace tensorflow {
// This class encapsulates all the meta data that is associated with an MKL
// tensor. A tensor is an MKL tensor if it was created as the result of an
// MKL operation, and did not go through a conversion to a standard
// Tensorflow tensor.
typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims;
typedef enum {
Dim_N = 0,
Dim_C = 1,
Dim_H = 2,
Dim_W = 3,
Dim_O = 0,
Dim_I = 1
} MklDnnDims;
class MklShape {
public:
MklShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy
~MklShape() {
if (sizes_) delete[] sizes_;
if (strides_) delete[] strides_;
if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS);
if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS);
if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_;
}
const bool IsMklTensor() const { return isMklTensor_; }
void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; }
void SetDimensions(const size_t dimension) { dimension_ = dimension; }
void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; }
void SetMklLayout(const void* primitive, size_t resourceType) {
CHECK_EQ(
dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive,
(dnnResourceType_t)resourceType),
E_SUCCESS);
}
void SetTfLayout(const size_t dimension, const size_t* sizes,
const size_t* strides) {
dimension_ = dimension;
if (dimension > 0) { // MKl doesn't support zero dimension tensors
sizes_ = new size_t[dimension];
strides_ = new size_t[dimension];
for (int ii = 0; ii < dimension; ii++) {
sizes_[ii] = sizes[ii];
strides_[ii] = strides[ii];
}
CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides),
E_SUCCESS);
}
}
// Default case - MKL dim ordering is opposite of TF dim ordering
// MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim
// TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim
// For layers that rely on data_format semantics (conv, pooling etc.)
// or operate only on certain dimensions (relu, concat, split etc.),
// Mkl APIs might require us to reorder these dimensions. In such cases,
// kernels should explicitly set this map
void SetTfDimOrder(const size_t dimension) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = dimension - (ii + 1);
}
}
void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii];
}
}
void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
CHECK_EQ(dimension, 4);
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N;
}
const dnnLayout_t GetMklLayout() const { return mklLayout_; }
const dnnLayout_t GetTfLayout() const { return tfLayout_; }
const dnnLayout_t GetCurLayout() const {
return isMklTensor_ ? mklLayout_ : tfLayout_;
}
size_t GetDimension() const { return dimension_; }
const size_t* GetSizes() const { return sizes_; }
int64 dim_size(int index) const { return sizes_[index]; }
int64 tf_dim_size(int index) const {
return sizes_[tf_to_mkl_dim_map_[index]];
}
const size_t* GetStrides() const { return strides_; }
const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; }
size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Channel dimension.
bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Batch dimension.
bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Width dimension.
bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Height dimension.
bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; }
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NCHW format.
bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NHWC format.
bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
void GetConvertedFlatData(dnnLayout_t targetLayout, void* input,
void* output) const {
dnnLayout_t curLayout;
if (isMklTensor_)
curLayout = mklLayout_;
else
curLayout = tfLayout_;
dnnPrimitive_t convert;
CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout),
E_SUCCESS);
CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS);
CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS);
}
// The following methods are used for serializing and de-serializing the
// contents of the mklshape object.
// The data is serialized in this order
// isMklTensor_
// dimension_
// sizes_
// strides_
// mklLayout_
// tfLayout_
// tf_to_mkl_dim_map_
#define SIZE_OF_MKL_DNN_BUF \
(dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to
// serialize dnn_layout pointer
// Size of buffer to hold the serialized object, the size is computed as
// follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) +
// sizeof(strides_)
// + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer)
// + sizeof(tf_to_mkl_dim_map_)
#define SIZE_OF_MKL_SERIAL_DATA(dims) \
(2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF)
// First we need to define some macro for offsets into the serial buffer where
// different elements of Mklshape is written/read from
#define IS_MKL_TENSOR_OFFSET 0
// Location from start of buffer where isMklTensor_ is serialized
#define DIMS_OFFSET \
(IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_
// Location of sizes. Note dim is not used here, left here
// to make macros consistent.
#define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t))
#define STRIDES_OFFSET(dims) \
(SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides
#define MKL_LAYOUT_OFFSET(dims) \
(STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_
#define TF_LAYOUT_OFFSET(dims) \
(MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_
// Location of tf_to_mkl_dim_map_
#define TF_TO_MKL_DIM_MAP_OFFSET(dims) \
(TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF)
// TODO(agramesh1) make sure to create a const to share with rewrite pass
// for min size of MKL metadata tensor.
void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) {
CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize";
// Make sure buffer holds at least isMklTensor_
isMklTensor_ =
*reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0;
if (isMklTensor_) { // If it is an MKL Tensor then read the rest
dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET));
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small in DeSerialize";
sizes_ = new size_t[dimension_];
strides_ = new size_t[dimension_];
tf_to_mkl_dim_map_ = new size_t[dimension_];
for (int i = 0; i < dimension_; i++) {
sizes_[i] =
reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i];
strides_[i] = reinterpret_cast<const size_t*>(
buf + STRIDES_OFFSET(dimension_))[i];
tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>(
buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i];
}
CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_,
buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
void SerializeMklShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small to Serialize";
*reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) =
isMklTensor_ ? 1 : 0;
if (isMklTensor_) {
*(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_;
for (int i = 0; i < dimension_; i++) {
reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] =
sizes_[i];
reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] =
strides_[i];
reinterpret_cast<size_t*>(buf +
TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] =
tf_to_mkl_dim_map_[i];
}
CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(
dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
private:
bool isMklTensor_ =
false; // Flag to indicate if the tensor is an MKL tensor or not
dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout
dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding
// Tensorflow tensor, used when conversion from MKL to standard tensor
size_t dimension_ = 0;
size_t* sizes_ = nullptr; // Required by MKL for conversions
size_t* strides_ = nullptr; // Required by MKL for conversions
size_t* tf_to_mkl_dim_map_ =
nullptr; // TF dimension corresponding to this MKL dimension
};
#ifndef INTEL_MKL_ML
// Forward decl
TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format);
memory::dims CalculateTFStrides(const memory::dims& dims_tf_order);
memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype);
class MklDnnShape {
private:
typedef struct {
/// Flag to indicate if the tensor is an MKL tensor or not
bool is_mkl_tensor_ = false;
/// Number of dimensions in Tensorflow format
size_t dimension_ = 0;
/// Required by MKLDNN for conversions
mkldnn_dims_t sizes_; // Required by MKL for conversions
memory::format tf_data_format_ = memory::format::format_undef;
memory::data_type T_ = memory::data_type::data_undef;
// MKL layout
mkldnn_memory_desc_t mkl_md_;
/// TF dimension corresponding to this MKL dimension
mkldnn_dims_t map_;
} MklShapeData;
MklShapeData data_;
typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t;
#define INVALID_DIM_SIZE -1
public:
MklDnnShape() {
for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
++i) {
data_.sizes_[i] = -1;
}
for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) {
data_.map_[i] = -1;
}
}
~MklDnnShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy
/// Helper function to compare memory::desc objects for MklDnn.
/// May be this should go into MklDnn directly.
inline bool CompareMklDnnLayouts(const memory::desc& md1,
const memory::desc& md2) const {
mkldnn_memory_desc_t mdd1 = md1.data;
mkldnn_memory_desc_t mdd2 = md2.data;
const char* d1 = reinterpret_cast<const char*>(&mdd1);
const char* d2 = reinterpret_cast<const char*>(&mdd2);
size_t md_size = sizeof(mdd1);
for (size_t i = 0; i < md_size; i++) {
if (*d1++ != *d2++) {
return false;
}
}
return true;
}
/// Equality function for MklDnnShape objects
/// @return true if both are equal; false otherwise.
inline bool operator==(const MklDnnShape& input_shape) const {
if (this->IsMklTensor() != input_shape.IsMklTensor()) {
return false;
}
// If input tensors are in Mkl layout, then we check for dimensions and
// sizes.
if (this->IsMklTensor()) {
return this->GetTfShape() == input_shape.GetTfShape() &&
CompareMklDnnLayouts(this->GetMklLayout(),
input_shape.GetMklLayout());
}
return true;
}
/// Equality operator for MklDnnShape and TFShape.
/// Returns: true if TF shapes for both are the same, false otherwise
inline bool operator==(const TensorShape& input_shape) const {
if (!this->IsMklTensor()) {
return false;
}
return this->GetTfShape() == input_shape;
}
inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; }
inline void SetMklTensor(bool is_mkl_tensor) {
data_.is_mkl_tensor_ = is_mkl_tensor;
}
inline void SetDimensions(const size_t dimension) {
data_.dimension_ = dimension;
}
inline size_t GetDimension(char dimension) const {
int index = GetMklDnnTensorDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline int32 GetMklDnnTensorDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims::Dim_N;
case 'C':
return MklDnnDims::Dim_C;
case 'H':
return MklDnnDims::Dim_H;
case 'W':
return MklDnnDims::Dim_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline size_t GetDimension() const { return data_.dimension_; }
inline const int* GetSizes() const {
return reinterpret_cast<const int*>(&data_.sizes_[0]);
}
// Returns an mkldnn::memory::dims object that contains the sizes of this
// MklDnnShape object.
inline memory::dims GetSizesAsMklDnnDims() const {
memory::dims retVal;
if (data_.is_mkl_tensor_) {
size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
for (size_t i = 0; i < dimensions; i++) {
if (data_.sizes_[i] != INVALID_DIM_SIZE)
retVal.push_back(data_.sizes_[i]);
}
} else {
CHECK_EQ(data_.is_mkl_tensor_, true);
}
return retVal;
}
inline int64 DimSize(int index) const {
CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0]));
return data_.sizes_[index];
}
/// Return TensorShape that describes the Tensorflow shape of the tensor
/// represented by this MklShape.
inline TensorShape GetTfShape() const {
CHECK_EQ(data_.is_mkl_tensor_, true);
std::vector<int32> shape(data_.dimension_, -1);
if (data_.tf_data_format_ != memory::format::blocked) {
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[TfDimIdx(idx)];
}
} else {
// If Tensorflow shape is in Blocked format, then we don't have dimension
// map for it. So we just create Tensorflow shape from sizes in the
// specified order.
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[idx];
}
}
TensorShape ts;
bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok();
CHECK_EQ(ret, true);
return ts;
}
inline void SetElemType(memory::data_type dt) { data_.T_ = dt; }
inline const memory::data_type GetElemType() { return data_.T_; }
inline void SetMklLayout(memory::primitive_desc* pd) {
CHECK_NOTNULL(pd);
data_.mkl_md_ = pd->desc().data;
}
inline void SetMklLayout(memory::desc* md) {
CHECK_NOTNULL(md);
data_.mkl_md_ = md->data;
}
inline const memory::desc GetMklLayout() const {
return memory::desc(data_.mkl_md_);
}
inline memory::format GetTfDataFormat() const {
return data_.tf_data_format_;
}
/// We don't create primitive_descriptor for TensorFlow layout now.
/// We use lazy evaluation and create it only when needed. Input format can
/// also be Blocked format.
inline void SetTfLayout(size_t dims, const memory::dims& sizes,
memory::format format) {
CHECK_EQ(dims, sizes.size());
data_.dimension_ = dims;
for (size_t ii = 0; ii < dims; ii++) {
data_.sizes_[ii] = sizes[ii];
}
data_.tf_data_format_ = format;
if (format != memory::format::blocked) {
SetTfDimOrder(dims, format);
}
}
inline const memory::desc GetTfLayout() const {
memory::dims dims;
for (size_t ii = 0; ii < data_.dimension_; ii++) {
dims.push_back(data_.sizes_[ii]);
}
// Create Blocked memory desc if input TF format was set like that.
if (data_.tf_data_format_ == memory::format::blocked) {
auto strides = CalculateTFStrides(dims);
return CreateBlockedMemDescHelper(dims, strides, data_.T_);
} else {
return memory::desc(dims, data_.T_, data_.tf_data_format_);
}
}
inline const memory::desc GetCurLayout() const {
return IsMklTensor() ? GetMklLayout() : GetTfLayout();
}
// nhasabni - I've removed SetTfDimOrder that was setting default order in
// case of MKL-ML. We don't need a case of default dimension order because
// when an operator that does not get data_format attribute gets all inputs
// in Tensorflow format, it will produce output in Tensorflow format.
inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) {
CHECK(dimension == data_.dimension_);
for (size_t ii = 0; ii < dimension; ii++) {
data_.map_[ii] = map[ii];
}
}
inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
// TODO(nhasabni): Why do we restrict this to 4D?
CHECK_EQ(dimension, 4);
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W;
data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H;
data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C;
data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N;
}
inline void SetTfDimOrder(const size_t dimension, memory::format format) {
TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format);
SetTfDimOrder(dimension, data_format);
}
inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; }
inline size_t TfDimIdx(int index) const { return data_.map_[index]; }
inline int64 TfDimSize(int index) const {
return data_.sizes_[TfDimIdx(index)];
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Channel dimension.
inline bool IsMklChannelDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_C;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Batch dimension.
inline bool IsMklBatchDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_N;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Width dimension.
inline bool IsMklWidthDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_W;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Height dimension.
inline bool IsMklHeightDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_H;
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NCHW format.
inline bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NHWC format.
inline bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// The following methods are used for serializing and de-serializing the
/// contents of the mklshape object.
/// The data is serialized in this order
/// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_;
/// Size of buffer to hold the serialized object, the size is computed by
/// following above mentioned order
inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); }
void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small to SerializeMklDnnShape";
*reinterpret_cast<MklShapeData*>(buf) = data_;
}
void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) {
// Make sure buffer holds at least is_mkl_tensor_.
CHECK(buf_size >= sizeof(data_.is_mkl_tensor_))
<< "Buffer size is too small in DeSerializeMklDnnShape";
const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf);
if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small in DeSerializeMklDnnShape";
data_ = *reinterpret_cast<const MklShapeData*>(buf);
}
}
};
#endif
// List of MklShape objects. Used in Concat/Split layers.
typedef std::vector<MklShape> MklShapeList;
#ifndef INTEL_MKL_ML
typedef std::vector<MklDnnShape> MklDnnShapeList;
#endif
// Check if all tensors specified by MklShapes are MKL tensors.
inline bool AreAllMklTensors(const MklShapeList& shapes) {
for (auto& s : shapes) {
if (!s.IsMklTensor()) {
return false;
}
}
return true;
}
#ifdef INTEL_MKL_ML
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklShape& mkl_shape) {
Tensor output_tensor;
TensorShape output_shape;
for (size_t j = 0; j < mkl_shape.GetDimension(); j++) {
// Outermost to innermost dimension
output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]);
}
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor);
dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout());
void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data());
void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data());
if (mkl_tensor.NumElements() != 0) {
mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer);
}
return output_tensor;
}
#else
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklDnnShape& mkl_shape) {
Tensor output_tensor;
TensorShape output_shape;
TF_CHECK_OK(
Status(error::Code::UNIMPLEMENTED, "Unimplemented conversion function"));
return output_tensor;
}
#endif
// Get the MKL shape from the second string tensor
inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) {
mklshape->DeSerializeMklShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#ifndef INTEL_MKL_ML
inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) {
mklshape->DeSerializeMklDnnShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#endif
// Gets the actual input
inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) {
return ctext->input(GetTensorDataIndex(n, ctext->num_inputs()));
}
inline void GetMklInputList(OpKernelContext* ctext, StringPiece name,
OpInputList* input_tensors) {
CHECK_NOTNULL(input_tensors);
ctext->input_list(name, input_tensors);
}
#ifdef INTEL_MKL_ML
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#else
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklDnnShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklDnnShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#endif
#ifndef INTEL_MKL_ML
/// Get shape of input tensor pointed by 'input_idx' in TensorShape format.
/// If the input tensor is in MKL layout, then obtains TensorShape from
/// MklShape.
inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) {
// Sanity check.
CHECK_NOTNULL(context);
CHECK_LT(input_idx, context->num_inputs());
MklDnnShape input_mkl_shape;
GetMklShape(context, input_idx, &input_mkl_shape);
if (input_mkl_shape.IsMklTensor()) {
return input_mkl_shape.GetTfShape();
} else {
const Tensor& t = MklGetInput(context, input_idx);
return t.shape();
}
}
#endif
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#ifndef INTEL_MKL_ML
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#ifndef INTEL_MKL_ML
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
// Allocates a temp tensor and returns the data buffer for temporary storage.
// Currently
#ifndef INTEL_MKL_ML
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
const memory::primitive_desc& pd, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(pd.get_size() / sizeof(T) + 1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<T>().data());
}
#endif
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
dnnLayout_t lt_buff, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(
dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) /
sizeof(float) +
1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<float>().data());
}
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
TensorShape tf_shape) {
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
}
inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides,
const size_t* sizes) {
// MKL requires strides in NCHW
if (data_format == FORMAT_NHWC) {
strides[0] = sizes[2];
strides[1] = sizes[0] * sizes[2];
strides[2] = 1;
strides[3] = sizes[0] * sizes[1] * sizes[2];
} else {
strides[0] = 1;
strides[1] = sizes[0];
strides[2] = sizes[0] * sizes[1];
strides[3] = sizes[0] * sizes[1] * sizes[2];
}
}
inline void MklSizesToTFSizes(OpKernelContext* context,
TensorFormat data_format_,
const MklShape& mkl_shape,
TensorShape* tf_shape) {
size_t tf_dim = mkl_shape.GetDimension();
const size_t* tf_sizes = mkl_shape.GetSizes();
OP_REQUIRES(context, tf_dim == 4,
errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim"));
std::vector<int32> sizes;
sizes.push_back(tf_sizes[3]);
if (data_format_ == FORMAT_NHWC) {
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
sizes.push_back(tf_sizes[2]);
} else {
sizes.push_back(tf_sizes[2]);
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
}
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape));
}
inline int32 GetMklTensorDimIndex(char dimension) {
switch (dimension) {
case 'N':
return MklDims::N;
case 'C':
return MklDims::C;
case 'H':
return MklDims::H;
case 'W':
return MklDims::W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) {
int index = GetMklTensorDimIndex(dimension);
CHECK(index >= 0 && index < mkl_shape.GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return mkl_shape.dim_size(index);
}
inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
const Tensor& meta = context->input(idx_meta_in);
Tensor output(data.dtype());
Tensor meta_output(meta.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, data.shape()));
CHECK(meta_output.CopyFrom(meta, meta.shape()));
context->set_output(idx_data_out, output);
context->set_output(idx_meta_out, meta_output);
}
#ifdef INTEL_MKL_ML
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#else
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#endif
#ifdef INTEL_MKL_ML
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#else
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklDnnShape dnn_shape_output;
dnn_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, dnn_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifndef INTEL_MKL_ML
inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context,
int idx_in, int idx_out,
const MklDnnShape& mkl_shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
AllocateOutputSetMklShape(context, idx_out, mkl_shape);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
// Forward the MKL shape ONLY (used in elementwise and other ops where
// we call the eigen implementation and MKL shape is not used)
inline void ForwardMklMetaDataInToOut(OpKernelContext* context,
uint32 idx_data_in,
uint32_t idx_data_out) {
uint32 idx_meta_in =
GetTensorMetaDataIndex(idx_data_in, context->num_inputs());
uint32 idx_meta_out =
GetTensorMetaDataIndex(idx_data_out, context->num_outputs());
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
// Set a dummy MKL shape (called when the output is in TF format)
inline void SetDummyMklShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
#ifdef INTEL_MKL_ML
// We don't need these functions in MKLDNN. We have defined equality operator
// on MklDnnShape class directly.
// Checks if the TF shape for both MKL tensors is the same or not
// Returns: true if both TF shapes are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const MklShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const MklShape* input_shape_1) {
return MklCompareShapes(input_shape_1, input_shape_0);
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->dims() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->dims();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
#endif
// These functions do not compile with MKL-DNN since mkl.h is missing.
// We may need to remove them later.
// TODO(intel_tf): Remove this routine when faster MKL layout conversion is
// out.
inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = input.dim_size(0);
int64 H = input.dim_size(1);
int64 W = input.dim_size(2);
int64 C = input.dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C,
buf_out + n * stride_n, H * W);
}
}
inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = (*output)->dim_size(0);
int64 H = (*output)->dim_size(1);
int64 W = (*output)->dim_size(2);
int64 C = (*output)->dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W,
buf_out + n * stride_n, C);
}
}
// -------------------------------------------------------------------
#ifndef INTEL_MKL_ML
/// Return MKL-DNN data type (memory::data_type) for input type T
///
/// @input None
/// @return memory::data_type corresponding to type T
template <typename T>
static memory::data_type MklDnnType();
/// Instantiation for float type. Add similar instantiations for other
/// type if needed.
template <>
memory::data_type MklDnnType<float>() {
return memory::data_type::f32;
}
/// Map TensorFlow's data format into MKL-DNN data format
///
/// @input: TensorFlow data format
/// @return: memory::format corresponding to TensorFlow data format;
/// Fails with an error if invalid data format.
inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC)
return memory::format::nhwc;
else if (format == FORMAT_NCHW)
return memory::format::nchw;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to get rid of compiler warning
return memory::format::format_undef;
}
/// Map MKL-DNN data format to TensorFlow's data format
///
/// @input: memory::format
/// @return: Tensorflow data format corresponding to memory::format
/// Fails with an error if invalid data format.
inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) {
if (format == memory::format::nhwc)
return FORMAT_NHWC;
else if (format == memory::format::nchw)
return FORMAT_NCHW;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure
// that we don't come here.
return FORMAT_NHWC;
}
/// Map TensorShape object into memory::dims required by MKL-DNN
///
/// This function will simply map input TensorShape into MKL-DNN dims
/// naively. So it will preserve the order of dimensions. E.g., if
/// input tensor is in NHWC format, then dims will be in NHWC format
/// also.
///
/// @input TensorShape object in shape
/// @return memory::dims corresponding to TensorShape
inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) {
memory::dims dims(shape.dims());
for (int d = 0; d < shape.dims(); ++d) {
dims[d] = shape.dim_size(d);
}
return dims;
}
/// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN
///
/// This function is a specific one than above function. It will map input
/// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the
/// order of dimensions. E.g., if input tensor is in NHWC format, then dims
/// will be in NCHW format, and not in NHWC format.
///
/// @input TensorShape object in shape
/// @return memory::dims in MKL-DNN required NCHW format
inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = shape.dim_size(GetTensorDimIndex(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex(format, 'C'));
int h = shape.dim_size(GetTensorDimIndex(format, 'H'));
int w = shape.dim_size(GetTensorDimIndex(format, 'W'));
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Overloaded version of function above. Input parameters are
/// self-explanatory.
inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = in_dims[GetTensorDimIndex(format, 'N')];
int c = in_dims[GetTensorDimIndex(format, 'C')];
int h = in_dims[GetTensorDimIndex(format, 'H')];
int w = in_dims[GetTensorDimIndex(format, 'W')];
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Map MklDnn memory::dims object into TensorShape object.
///
/// This function will simply map input shape in MKL-DNN memory::dims format
/// in Tensorflow's TensorShape object by perserving dimension order.
///
/// @input MKL-DNN memory::dims object
/// @output TensorShape corresponding to memory::dims
inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) {
std::vector<int32> shape(dims.size(), -1);
for (int d = 0; d < dims.size(); d++) {
shape[d] = dims[d];
}
TensorShape ret;
CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true);
return ret;
}
/// Function to calculate strides given tensor shape in Tensorflow order
/// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention,
/// dimesion with size 1 is outermost dimension; while dimension with size 4 is
/// innermost dimension. So strides for this tensor would be {4 * 3 * 2,
/// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}.
///
/// @input Tensorflow shape in memory::dims type
/// @return memory::dims containing strides for the tensor.
inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) {
CHECK_GT(dims_tf_order.size(), 0);
memory::dims strides(dims_tf_order.size());
int last_dim_idx = dims_tf_order.size() - 1;
strides[last_dim_idx] = 1;
for (int d = last_dim_idx - 1; d >= 0; d--) {
strides[d] = strides[d + 1] * dims_tf_order[d + 1];
}
return strides;
}
inline padding_kind TFPaddingToMklDnnPadding(Padding pad) {
// MKL-DNN only supports zero padding.
return padding_kind::zero;
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype) {
CHECK_EQ(dim.size(), strides.size());
// We have to construct memory descriptor in a C style. This is not at all
// ideal but MKLDNN does not offer any API to construct descriptor in
// blocked format except a copy constructor that accepts
// mkldnn_memory_desc_t.
mkldnn_memory_desc_t md;
md.primitive_kind = mkldnn_memory;
md.ndims = dim.size();
md.format = mkldnn_blocked;
md.data_type = memory::convert_to_c(dtype);
for (size_t i = 0; i < dim.size(); i++) {
md.layout_desc.blocking.block_dims[i] = 1;
md.layout_desc.blocking.strides[1][i] = 1;
md.layout_desc.blocking.strides[0][i] = strides[i];
md.layout_desc.blocking.padding_dims[i] = dim[i];
md.layout_desc.blocking.offset_padding_to_data[i] = 0;
md.dims[i] = dim[i];
}
md.layout_desc.blocking.offset_padding = 0;
return memory::desc(md);
}
/*
* Class to represent all the resources corresponding to a tensor in TensorFlow
* that are required to execute an operation (such as Convolution).
*/
template <typename T>
class MklDnnData {
private:
/// MKL-DNN memory primitive for input user memory
memory* user_memory_;
/// MKL-DNN memory primitive in case input or output reorder is needed.
memory* reorder_memory_;
/// Operations memory descriptor
memory::desc* op_md_;
/// CPU engine on which operation will be executed
const engine* cpu_engine_;
public:
explicit MklDnnData(const engine* e)
: user_memory_(nullptr),
reorder_memory_(nullptr),
op_md_(nullptr),
cpu_engine_(e) {}
~MklDnnData() {
cpu_engine_ = nullptr; // We don't own this.
delete (user_memory_);
delete (reorder_memory_);
delete (op_md_);
}
inline void* GetTensorBuffer(const Tensor* tensor) const {
CHECK_NOTNULL(tensor);
return const_cast<void*>(
static_cast<const void*>(tensor->flat<T>().data()));
}
/// Set user memory primitive using specified dimensions, memory format and
/// data_buffer. Function automatically uses element data type by using
/// input type T used for creating call object.
///
/// In a nutshell, function allows user to describe the input tensor to
/// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and
/// memory format HWIO, and the buffer that contains actual values is
/// pointed by data_buffer.
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
void* data_buffer = nullptr) {
auto md = memory::desc(dim, MklDnnType<T>(), fm);
SetUsrMem(md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, fm, GetTensorBuffer(tensor));
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim,
const memory::dims& strides) {
return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>());
}
/// A version of SetUsrMem call that allows user to create memory in blocked
/// format. So in addition to accepting dimensions, it also accepts strides.
/// This allows user to create memory for tensor in a format that is not
/// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6
/// dimensional tensor as a native format. But by using blocked format, a user
/// can create memory for 6D tensor.
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
void* data_buffer = nullptr) {
CHECK_EQ(dim.size(), strides.size());
auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides);
SetUsrMem(blocked_md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, strides, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts memory
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) {
auto pd = memory::primitive_desc(md, *cpu_engine_);
SetUsrMem(pd, data_buffer);
}
/// A version of SetUsrMem with memory descriptor and tensor
inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(md, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts primitive
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::primitive_desc& pd,
void* data_buffer = nullptr) {
CHECK_NOTNULL(cpu_engine_);
// TODO(nhasabni): can we remove dynamic memory allocation?
if (data_buffer) {
user_memory_ = new memory(pd, data_buffer);
} else {
user_memory_ = new memory(pd);
}
}
/// A version of SetUsrMem with primitive descriptor and tensor
inline void SetUsrMem(const memory::primitive_desc& pd,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(pd, GetTensorBuffer(tensor));
}
/// Get function for user memory primitive.
inline const memory* GetUsrMem() const { return user_memory_; }
/// Get function for primitive descriptor of user memory primitive.
inline const memory::primitive_desc GetUsrMemPrimDesc() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_primitive_desc();
}
/// Get function for descriptor of user memory.
inline memory::desc GetUsrMemDesc() {
// This is ugly. Why MKL-DNN does not provide desc() method of const type??
const memory::primitive_desc pd = GetUsrMemPrimDesc();
return const_cast<memory::primitive_desc*>(&pd)->desc();
}
/// Get function for data buffer of user memory primitive.
inline void* GetUsrMemDataHandle() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_data_handle();
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(void* data_buffer) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(data_buffer);
user_memory_->set_data_handle(data_buffer);
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(const Tensor* tensor) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(tensor);
user_memory_->set_data_handle(GetTensorBuffer(tensor));
}
/// Get the memory primitive for input and output of an op. If inputs
/// to an op require reorders, then this function returns memory primitive
/// for reorder. Otherwise, it will return memory primitive for user memory.
///
/// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to
/// execute Conv2D, we need memory primitive for I and F. Buf if reorder is
/// required for I and F (say I_r is reorder primitive for I; F_r is reorder
/// primitive for F), then we need I_r and F_r to perform Conv2D.
inline const memory& GetOpMem() const {
return reorder_memory_ ? *reorder_memory_ : *user_memory_;
}
/// Set memory descriptor of an operation in terms of dimensions and memory
/// format. E.g., For Conv2D, the dimensions would be same as user dimensions
/// but memory::format would be mkldnn::any because we want MKL-DNN to choose
/// best layout/format for given input dimensions.
inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) {
// TODO(nhasabni): can we remove dynamic memory allocation?
op_md_ = new memory::desc(dim, MklDnnType<T>(), fm);
}
/// Get function for memory descriptor for an operation
inline const memory::desc& GetOpMemDesc() const { return *op_md_; }
/// Predicate that checks if we need to reorder user's memory into memory
/// pointed by op_pd.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const {
CHECK_NOTNULL(user_memory_);
return op_pd != user_memory_->get_primitive_desc();
}
/// Predicate that checks if we need to reorder user's memory into memory
/// based on the provided format.
///
/// @input: target_format - memory format of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::format& target_format) const {
CHECK_NOTNULL(user_memory_);
return target_format !=
user_memory_->get_primitive_desc().desc().data.format;
}
/// Function to create a reorder from memory pointed by from to memory pointed
/// by to. Returns created primitive.
inline primitive CreateReorder(const memory* from, const memory* to) const {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
return reorder(*from, *to);
}
/// Function to handle input reordering
///
/// Check if we need to reorder this input of an operation.
/// Return true and allocate reorder memory primitive if reorder is needed.
/// Otherwise, return false and do not allocate reorder memory primitive.
///
/// To check if reorder is needed, this function compares memory primitive
/// descriptor of an operation (op_pd) for the given input with the
/// user-specified memory primitive descriptor.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// Overloaded version of above function that accepts memory buffer
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_data_handle - memory buffer where output of reorder needs to be
/// stored. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
void* reorder_data_handle,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_data_handle);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd, reorder_data_handle);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// Another overloaded version of CheckReorderToOpMem that accepts Tensor
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_tensor - Tensor whose buffer is to be used to store output of
/// reorder. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
Tensor* reorder_tensor,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net);
}
/// Function to handle output reorder
///
/// This function performs very similar functionality as input reordering
/// function above. The only difference is that this function does not add
/// reorder primitive to the net. The reason for this is: the reorder
/// primitive for output needs to be added to the list only after operation
/// has executed. But we need to prepare a temporary buffer in case output
/// reorder is needed. And this temporary buffer will hold the output of
/// an operation before it is fed to reorder primitive.
///
/// @input memory primitive descriptor for the given output of an operation
/// @return: true in case reorder of output is needed; false, otherwise.
inline bool PrepareReorderToUserMemIfReq(
const memory::primitive_desc& op_pd) {
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
return true;
}
return false;
}
/// Function to actually insert reorder primitive in the net
///
/// This function completes remaining part of output reordering. It inserts
/// a reordering primitive from the temporary buffer that holds the output
/// to the user-specified output buffer.
///
/// @input: net - net to which to add reorder primitive
inline void InsertReorderToUserMem(std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(reorder_memory_);
net->push_back(CreateReorder(reorder_memory_, user_memory_));
}
};
/// Base class for operations with reuse of DNN primitives
///
class DnnOp {
public:
virtual ~DnnOp() {}
// Dummy data. Its size, hard-coded as 256 here, does
// not matter since MKL should never operate on this buffer.
unsigned char DummyData[256];
};
const mkldnn::memory::dims NONE_DIMS = {};
// This constant is used to declare dummy buffer (size), for MKL primitives
template <typename T>
class DnnOpFactory {
public:
DnnOpFactory() {}
~DnnOpFactory() {}
DnnOp* GetOp(const std::string& key) {
auto stream_iter = DnnOpFactory<T>::GetHashMap().find(key);
if (stream_iter == DnnOpFactory<T>::GetHashMap().end()) {
return nullptr;
} else {
return stream_iter->second;
}
}
void SetOp(const std::string& key, DnnOp* op) {
auto stream_iter = DnnOpFactory<T>::GetHashMap().find(key);
CHECK(stream_iter == DnnOpFactory<T>::GetHashMap().end());
DnnOpFactory<T>::GetHashMap()[key] = op;
}
private:
static inline std::unordered_map<std::string, DnnOp*> &GetHashMap() {
static thread_local std::unordered_map<std::string, DnnOp*> map_;
return map_;
}
};
// utility class for creating keys of MKL primitive pool.
class FactoryKeyCreator {
public:
FactoryKeyCreator() {
key_.reserve(kMaxKeyLength);
}
~FactoryKeyCreator() {}
void AddAsKey(const string &str) {
auto buffer = reinterpret_cast<const char *>(str.c_str());
Append(buffer, str.length());
}
void AddAsKey(const mkldnn::memory::dims &dims) {
for (unsigned int i = 0; i < dims.size(); i++) {
AddAsKey<int>(dims[i]);
}
}
template <typename T>
void AddAsKey(const T data) {
auto buffer = reinterpret_cast<const char *>(&data);
Append(buffer, sizeof(T));
}
std::string GetKey() {
return key_;
}
private:
string key_;
const char delimiter = 'x';
const int kMaxKeyLength = 256;
void Append(const char* data, int len) {
key_.append(data, len);
key_.append(1, delimiter);
}
};
#endif // INTEL_MKL_DNN
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
|
comm.h | /**
* Copyright (c) 2015 by Contributors
*/
#ifndef MXNET_KVSTORE_COMM_H_
#define MXNET_KVSTORE_COMM_H_
#include <string>
#include <algorithm>
#include <utility>
#include <limits>
#include <vector>
#include <tuple>
#include "mxnet/ndarray.h"
namespace mxnet {
namespace kvstore {
/**
* \brief multiple device commmunication
*/
class Comm {
public:
Comm() {
pinned_ctx_ = Context::CPUPinned(0);
}
virtual ~Comm() { }
/**
* \brief init key with the data shape
*/
virtual void Init(int key, const TShape& shape, int dtype = mshadow::kFloat32) = 0;
/**
* \brief returns src[0] + .. + src[src.size()-1]
*/
virtual const NDArray& Reduce(
int key, const std::vector<NDArray>& src, int priority) = 0;
/**
* \brief copy from src to dst[i] for every i
*/
virtual void Broadcast(
int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) = 0;
/**
* \brief return a pinned contex
*/
Context pinned_ctx() const {
return pinned_ctx_;
}
protected:
Context pinned_ctx_;
};
/**
* \brief an implemention of Comm that first copy data to CPU memeory, and then
* reduce there
*/
class CommCPU : public Comm {
public:
CommCPU() {
nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4);
bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000);
}
virtual ~CommCPU() { }
void Init(int key, const TShape& shape, int type = mshadow::kFloat32) override {
merge_buf_[key].merged = NDArray(shape, pinned_ctx_, false, type);
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
return src[0];
}
std::vector<Engine::VarHandle> const_vars(src.size() - 1);
std::vector<NDArray> reduce(src.size());
auto& buf = merge_buf_[key];
CopyFromTo(src[0], &buf.merged, priority);
reduce[0] = buf.merged;
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size()-1);
for (size_t j = 0; j < src.size() - 1; ++j) {
buf.copy_buf[j] = NDArray(
src[0].shape(), pinned_ctx_, false, src[0].dtype());
}
}
for (size_t i = 1; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority);
reduce[i] = buf.copy_buf[i-1];
const_vars[i-1] = reduce[i].var();
}
Engine::Get()->PushSync([reduce, this](RunContext rctx) {
ReduceSumCPU(reduce);
}, Context::CPU(), const_vars, {reduce[0].var()},
FnProperty::kCPUPrioritized, priority, PROFILER_MESSAGE("KVStoreReduce"));
return buf.merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
int mask = src.ctx().dev_mask();
if (mask == Context::kCPU) {
for (auto d : dst) CopyFromTo(src, d, priority);
} else {
// first copy data to cpu, then broadcast
auto& buf = merge_buf_[key];
CopyFromTo(src, &buf.merged, priority);
for (auto d : dst) CopyFromTo(buf.merged, d, priority);
}
}
private:
// reduce sum into val[0]
inline void ReduceSumCPU(const std::vector<NDArray> &in_data) {
MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, {
std::vector<DType*> dptr(in_data.size());
for (size_t i = 0; i < in_data.size(); ++i) {
TBlob data = in_data[i].data();
CHECK(data.CheckContiguous());
dptr[i] = data.FlatTo2D<cpu, DType>().dptr_;
}
size_t total = in_data[0].shape().Size();
ReduceSumCPUImpl(dptr, total);
});
}
template<typename DType>
inline static void ReduceSumCPU(
const std::vector<DType*> &dptr, size_t offset, index_t size) {
using namespace mshadow; // NOLINT(*)
Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size));
for (size_t i = 1; i < dptr.size(); i+=4) {
switch (dptr.size() - i) {
case 1: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
in_0 += in_1;
break;
}
case 2: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
in_0 += in_1 + in_2;
break;
}
case 3: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3;
break;
}
default: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3 + in_4;
break;
}
}
}
}
template<typename DType>
inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) {
const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10));
long ntask = (total + step - 1) / step; // NOLINT(*)
if (total < bigarray_bound_ || nthread_reduction_ <= 1) {
ReduceSumCPU(dptr, 0, total);
} else {
#pragma omp parallel for schedule(static) num_threads(nthread_reduction_)
for (long j = 0; j < ntask; ++j) { // NOLINT(*)
size_t k = static_cast<size_t>(j);
size_t begin = std::min(k * step, total);
size_t end = std::min((k + 1) * step, total);
if (j == ntask - 1) CHECK_EQ(end, total);
ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin));
}
}
}
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the merged value
NDArray merged;
/// \brief the cpu buffer for gpu data
std::vector<NDArray> copy_buf;
};
std::unordered_map<int, BufferEntry> merge_buf_;
size_t bigarray_bound_;
int nthread_reduction_;
};
/**
* \brief an implementation of Comm that performs reduction on device
* directly.
*
* It is faster if the total device-to-device bandwidths is larger than
* device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device
* memory.
*/
class CommDevice : public Comm {
public:
CommDevice() {
inited_ = false;
}
virtual ~CommDevice() { }
void Init(int key, const TShape& shape, int dtype = mshadow::kFloat32) override {
sorted_key_attrs_.push_back(std::make_tuple(key, shape, dtype));
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
return src[0];
}
if (!inited_) {
std::vector<Context> devs;
for (const auto& a : src) {
devs.push_back(a.ctx());
}
InitMergeBuffer(devs);
if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) {
EnableP2P(devs);
}
}
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
CopyFromTo(src[0], &(buf.merged), priority);
reduce[0] = buf.merged;
if (buf.copy_buf.empty()) {
// TODO(mli) this results in large device memory usage for huge ndarray,
// such as the largest fullc in VGG. consider to do segment reduce with
// NDArray.Slice or gpu direct memory access. for the latter, we need to
// remove some ctx check, and also it reduces 20% perf
buf.copy_buf.resize(src.size()-1);
for (size_t i = 0; i < src.size()-1; ++i) {
buf.copy_buf[i] = NDArray(
buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype());
}
}
for (size_t i = 0; i < src.size()-1; ++i) {
CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority);
reduce[i+1] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf.merged);
return buf.merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
if (!inited_) {
// copy to a random device first
int dev_id = key % dst.size();
CopyFromTo(src, dst[dev_id], priority);
for (size_t i = 0; i < dst.size(); ++i) {
if (i != static_cast<size_t>(dev_id)) {
CopyFromTo(*dst[dev_id], dst[i], priority);
}
}
} else {
auto& buf = merge_buf_[key];
CopyFromTo(src, &buf.merged, priority);
for (auto d : dst) {
CopyFromTo(buf.merged, d, priority);
}
}
}
private:
void EnableP2P(const std::vector<Context>& devs) {
#if MXNET_USE_CUDA
std::vector<int> gpus;
for (const auto& d : devs) {
if (d.dev_mask() == gpu::kDevMask) {
gpus.push_back(d.dev_id);
}
}
int n = static_cast<int>(gpus.size());
int enabled = 0;
std::vector<int> p2p(n*n);
for (int i = 0; i < n; ++i) {
cudaSetDevice(gpus[i]);
for (int j = 0; j < n; j++) {
int access;
cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]);
if (access) {
cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0);
if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) {
++enabled;
p2p[i*n+j] = 1;
}
}
}
}
if (enabled != n*(n-1)) {
// print warning info if not fully enabled
LOG(WARNING) << "only " << enabled << " out of "
<< n*(n-1) << " GPU pairs are enabled direct access. "
<< "It may affect the performance. "
<< "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off";
std::string access(n, '.');
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
access[j] = p2p[i*n+j] ? 'v' : '.';
}
LOG(WARNING) << access;
}
}
#endif
}
using KeyAttrs = std::tuple<int, TShape, int>;
// try to allocate buff on device evenly
void InitMergeBuffer(const std::vector<Context>& devs) {
std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), [](
const KeyAttrs& a, const KeyAttrs& b) {
return std::get<1>(a).Size() > std::get<1>(b).Size();
});
std::unordered_map<int, std::pair<Context, size_t>> ctx_info;
for (auto d : devs) {
ctx_info[d.dev_id] = std::make_pair(d, 0);
}
for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) {
int key = std::get<0>(sorted_key_attrs_[i]);
TShape s = std::get<1>(sorted_key_attrs_[i]);
int type = std::get<2>(sorted_key_attrs_[i]);
auto& buf = merge_buf_[key];
Context ctx;
size_t min_size = std::numeric_limits<size_t>::max();
for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) {
size_t size = it->second.second;
if (size <= min_size) {
ctx = it->second.first;
min_size = size;
}
}
buf.merged = NDArray(s, ctx, false, type);
ctx_info[ctx.dev_id].second += s.Size();
}
inited_ = true;
}
std::vector<KeyAttrs> sorted_key_attrs_;
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the merged value
NDArray merged;
/// \brief the gpu buffer
std::vector<NDArray> copy_buf;
};
std::unordered_map<int, BufferEntry> merge_buf_;
bool inited_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_COMM_H_
|
matrix_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file matrix_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <string>
#include <algorithm>
#include <utility>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "../channel_op_common.h"
#include "../mxnet_op.h"
#include "broadcast_reduce_op.h"
#include "./init_op.h"
#include "../../common/static_array.h"
#include "./slice-inl.h"
#if MXNET_USE_CUDA
#include <thrust/device_vector.h>
#endif
#ifdef __CUDACC__
#include "./pseudo2DTranspose_op-inl.cuh"
#endif
namespace mxnet {
namespace op {
struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
mxnet::TShape target_shape;
bool keep_highest;
mxnet::Tuple<int> shape;
bool reverse;
DMLC_DECLARE_PARAMETER(ReshapeParam) {
DMLC_DECLARE_FIELD(shape)
.set_default(mxnet::Tuple<int>())
.describe("The target shape");
DMLC_DECLARE_FIELD(reverse)
.set_default(false)
.describe("If true then the special values are inferred from right to left");
DMLC_DECLARE_FIELD(target_shape)
.set_default(mxnet::TShape(0, -1))
.describe("(Deprecated! Use ``shape`` instead.) "
"Target new shape. One and only one dim can be 0, "
"in which case it will be inferred from the rest of dims");
DMLC_DECLARE_FIELD(keep_highest).set_default(false)
.describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged."
"If set to true, then the first dim in target_shape is ignored,"
"and always fixed as input");
}
bool operator==(const ReshapeParam &other) const {
return this->target_shape == other.target_shape &&
this->keep_highest == other.keep_highest &&
this->shape == other.shape &&
this->reverse == other.reverse;
}
};
template<typename IType>
inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape,
const mxnet::TShape& dshape, bool reverse) {
std::vector<IType> dshape_vec;
std::vector<IType> param_shape_vec(shape.begin(), shape.end());
for (int i = 0; i < dshape.ndim(); ++i) {
dshape_vec.push_back(dshape[i]);
}
std::vector<IType> tmp;
size_t src_idx = 0;
int inf_idx = -1;
if (reverse) {
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
}
auto dshape_len = dshape_vec.size();
auto params_len = param_shape_vec.size();
for (size_t i = 0; i < params_len; ++i) {
IType proposed_dim = param_shape_vec[i];
if (proposed_dim == 0) {
// keep same
CHECK_LT(src_idx, dshape_len);
tmp.push_back(dshape_vec[src_idx++]);
} else if (proposed_dim == -1) {
// infer
CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred";
inf_idx = i;
tmp.push_back(1);
src_idx++;
} else if (proposed_dim == -2) {
// copy all remaining dims from source
while (src_idx < dshape_len) {
const int dn = dshape_vec[src_idx++];
tmp.push_back(dn);
}
} else if (proposed_dim == -3) {
// merge two dims from source
CHECK_LT(src_idx, dshape_len-1);
const int d1 = dshape_vec[src_idx++];
const int d2 = dshape_vec[src_idx++];
if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
tmp.push_back(-1);
} else {
tmp.push_back(d1 * d2);
}
} else if (proposed_dim == -4) {
// split the source dim s into two dims
// read the left dim and then the right dim (either can be -1)
CHECK_LT(i + 2, params_len);
CHECK_LT(src_idx, dshape_len);
const int d0 = dshape_vec[src_idx++];
IType d1 = param_shape_vec[++i];
IType d2 = param_shape_vec[++i];
CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
if (d1 == -1 && d0 >= 0) d1 = d0 / d2; // d0 must be known to do this
if (d2 == -1 && d0 >= 0) d2 = d0 / d1; // d0 must be known to do this
CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1)) <<
"Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0;
tmp.push_back(d1);
tmp.push_back(d2);
} else {
// greater than 0, new shape
tmp.push_back(proposed_dim);
src_idx++;
}
}
if (inf_idx >= 0) {
if (shape_is_known(dshape)) {
IType new_size = 1;
for (IType x : tmp) new_size *= x;
tmp[inf_idx] = dshape.Size() / new_size;
} else {
tmp[inf_idx] = -1;
}
}
if (reverse) {
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(tmp.begin(), tmp.end());
}
mxnet::TShape oshape(tmp.begin(), tmp.end());
return oshape;
}
inline bool ReverseReshapeInferShape(mxnet::TShape *in, const mxnet::TShape& out) {
if (shape_is_known(*in) && shape_is_known(out)) {
return true;
} else if (!shape_is_known(out)) {
return false;
} else {
int zero_axis = -1;
int known_dim_size_prod = 1;
for (int i = 0; i < in->ndim(); i++) {
if (!mxnet::dim_size_is_known(*in, i)) {
if (zero_axis != -1)
return false; // more than 1 zero found.
else
zero_axis = i;
} else {
known_dim_size_prod *= (*in)[i];
}
}
(*in)[zero_axis] = out.Size() / known_dim_size_prod;
return true;
}
}
inline bool ReshapeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape &dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape oshape;
if (param_.shape.ndim() != 0) {
oshape = InferReshapeShape(param_.shape, dshape, param_.reverse);
} else if (param_.target_shape.ndim() != -1) {
LOG(INFO) << "Using target_shape will be deprecated.";
oshape = param_.target_shape;
int neg_count = 0;
index_t inf_idx = 0;
index_t start_idx = param_.keep_highest ? 1 : 0;
if (param_.keep_highest) {
oshape[0] = dshape[0];
}
for (int i = start_idx; i < oshape.ndim(); ++i) {
if (oshape[i] == 0) {
neg_count++;
inf_idx = i;
}
}
if (neg_count == 1) {
oshape[inf_idx] = 1;
oshape[inf_idx] = dshape.Size() / oshape.Size();
}
} else {
return shape_is_known((*out_attrs)[0])
&& ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
ReverseReshapeInferShape(&dshape, oshape);
#if 0
CHECK_EQ(oshape.Size(), dshape.Size())
<< "Target shape size is different to source. "
<< "Target: " << oshape
<< "\nSource: " << dshape;
#endif
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
inline bool FlattenShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape &dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
int target_dim = 1;
for (int i = 1; i < dshape.ndim(); ++i) {
target_dim *= dshape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim));
return true;
}
struct TransposeParam : public dmlc::Parameter<TransposeParam> {
mxnet::TShape axes;
DMLC_DECLARE_PARAMETER(TransposeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, -1))
.describe("Target axis order. By default the axes will be inverted.");
}
bool operator==(const TransposeParam &other) const {
return this->axes == other.axes;
}
};
/*!
* \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache
* \param in input tensor
* \param out output tensor
* \param row shape of dim 0 of input
* \param col shape of dim 1 of input
* \tparam DType Data type
* \tparam is_addto
*/
template<typename DType, bool is_addto>
MSHADOW_XINLINE void Transpose2D(const DType *in, DType *out, index_t row, index_t col) {
// ensure cache line hits and prevent cache miss for any configuration
// L1 cache size to be utilized = 32kb = 2^15
// Largest size of a single unit of any dtype <= 8 byte = 2^3
// Number of elements - (2^15/2^3) = 2^12
// Block-size - 2^6 v 2^6 (64 v 64)
// But we could leverage unrolling of for loops (for parallelization)
// Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled
// blocksize * blocksize * num_threads = cache_size / dtype_size
// Instead of explicit unroll, let compiler figure out optimal unroll factor
const index_t blocksize = 32;
// collapse 2 parallelizes 2 for loops
// inner 2 for loops aren't parallelized to prevent cache miss
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (index_t i = 0; i < row; i += blocksize) {
for (index_t j = 0; j < col; j += blocksize) {
// transpose the block
for (index_t a = j; (a < blocksize + j) && (a < col); ++a) {
for (index_t b = i; (b < blocksize + i) && (b < row); ++b) {
if (!is_addto) {
out[a * row + b] = in[b * col + a];
} else {
out[a * row + b] += in[b * col + a];
}
}
}
}
}
}
inline bool IsIdentityTranspose(const TShape& axes) {
for (dim_t i = 0; i < axes.ndim(); i++) {
if (axes[i] != i) return false;
}
return true;
}
template<typename xpu, bool is_addto = false>
void TransposeImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(src.type_flag_, ret.type_flag_);
// zero-size tensor, no need to compute
if (src.shape_.Size() == 0U) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
#ifdef __CUDACC__
// This transpose can be used only if there exist n and m such that:
// params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1)
// Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3).
if (isPseudo2DTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
transpose_pseudo2D<DType, is_addto>(ret, src, axes, s);
});
return;
}
#endif
// Special handle the identity case
if (IsIdentityTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(src.Size()), s);
Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(ret.Size()), s);
if (!is_addto) {
// Use memcpy to accelerate the speed
Copy(out, in, s);
} else {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, kAddTo>, xpu>::Launch(
s, ret.Size(), out.dptr_, in.dptr_);
}
});
return;
}
// Handle the general transpose case
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
switch (axes.ndim()) {
case 2: {
Tensor<xpu, 2, DType> in = src.get<xpu, 2, DType>(s);
Tensor<xpu, 2, DType> out = ret.get<xpu, 2, DType>(s);
if (ctx.get_ctx().dev_mask() == cpu::kDevMask) {
Transpose2D<DType, is_addto>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]);
} else {
LOG(FATAL) << "Not Implemented. We should never reach here because the 2D case "
"in GPU has been covered by transpose_pseudo2D."
" Report an issue in Github.";
}
break;
}
case 3: {
Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s);
Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<3>());
} else {
out += transpose(in, axes.get<3>());
}
break;
}
case 4: {
Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<4>());
} else {
out += transpose(in, axes.get<4>());
}
break;
}
case 5: {
Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s);
Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<5>());
} else {
out += transpose(in, axes.get<5>());
}
break;
}
case 6: {
Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s);
Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<6>());
} else {
out += transpose(in, axes.get<6>());
}
break;
}
default:
LOG(FATAL) << "Transpose support at most 6 dimensions";
break;
}
});
}
// matrix transpose
template<typename xpu>
void Transpose(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (req[0] == kNullOp) {
return;
}
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK(req[0] == kWriteTo || req[0] == kAddTo)
<< "Transpose only supports kNullOp, kWriteTo and kAddTo";
mxnet::TShape axes;
if (param.axes.ndim() == 0) {
axes = mxnet::TShape(inputs[0].ndim(), -1);
for (int i = 0; i < axes.ndim(); ++i) {
axes[i] = axes.ndim() - 1 - i;
}
} else {
axes = common::CanonicalizeAxes(param.axes);
}
if (req[0] == kAddTo) {
TransposeImpl<xpu, true>(ctx.run_ctx, inputs[0], outputs[0], axes);
} else {
TransposeImpl<xpu, false>(ctx.run_ctx, inputs[0], outputs[0], axes);
}
}
inline bool TransposeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& shp = (*in_attrs)[0];
mxnet::TShape& out_shp = (*out_attrs)[0];
CHECK_LE(shp.ndim(), 6) << "Transpose support at most 6 dimensions";
if (shp.ndim() == -1 && out_shp.ndim() == -1)
return false; // none of the shapes is known
if (out_shp.ndim() >= 0 && shp.ndim() >= 0)
CHECK_EQ(out_shp.ndim(), shp.ndim());
mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1);
mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1);
if (param.axes.ndim() == 0) {
for (int i = 0; i < shp.ndim(); ++i) {
ret[i] = shp[shp.ndim()-1-i];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[shp.ndim()-1-i] = out_shp[i];
}
} else {
CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim());
for (int i = 0; i < shp.ndim(); ++i) {
CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim()));
ret[i] = shp[param.axes[i]];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[param.axes[i]] = out_shp[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 0, get);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
return shape_is_known(ret);
}
struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> {
int axis;
DMLC_DECLARE_PARAMETER(ExpandDimParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Position where new axis is to be inserted. Suppose that "
"the input `NDArray`'s dimension is `ndim`, the range of "
"the inserted axis is `[-ndim, ndim]`");
}
bool operator==(const ExpandDimParam &other) const {
return this->axis == other.axis;
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s;
axis_s << axis;
(*dict)["axis"] = axis_s.str();
}
};
inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
if (!mxnet::ndim_is_known(in_attrs->at(0)) && !mxnet::ndim_is_known(out_attrs->at(0))) {
return false;
}
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& oshape = (*out_attrs)[0];
int indim = ishape.ndim();
bool unknown_ishape = false;
if (-1 == indim) {
indim = oshape.ndim() - 1;
unknown_ishape = true;
}
int axis = param.axis;
if (axis < 0) {
axis += indim + 1;
}
CHECK(axis >= 0 && axis <= indim)
<< "axis must be in the range [" << -indim << ", " << indim << "] ("
<< param.axis << " provided)";
mxnet::TShape ret(indim + 1, -1);
for (int i = 0; i < axis; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i]);
}
ret[axis] = 1;
for (int i = axis+1; i < indim+1; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i-1]);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
ret = mxnet::TShape(indim, -1);
for (int i = 0; i < axis; ++i) ret[i] = oshape[i];
for (int i = axis+1; i < indim+1; ++i) ret[i-1] = oshape[i];
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret);
return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0));
}
// Currently MKLDNN only supports step = 1 or step has no value
inline bool SupportMKLDNNSlice(const SliceParam& param) {
if (param.step.ndim() == 0U) return true;
for (int i = 0; i < param.step.ndim(); ++i) {
if (param.step[i].has_value() && param.step[i].value() != 1)
return false;
}
return true;
}
inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
const auto& in_stype = in_attrs->at(0);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const auto dispatch_ex = DispatchMode::kFComputeEx;
// If step = 1, no need to fallback; otherwise fallback to dense
bool trivial_step = false;
if (param.step.ndim() == 0U) {
trivial_step = true;
} else if (param.step.ndim() == 1U
&& (!param.step[0].has_value() || param.step[0].value() == 1)) {
trivial_step = true;
}
if (in_stype == kDefaultStorage) {
#if MXNET_USE_MKLDNN == 1
if (dev_mask == Context::kCPU && MKLDNNEnvSet()
&& SupportMKLDNNSlice(param)) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, dispatch_ex);
}
#endif
if (!dispatched) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
}
if (!dispatched && in_stype == kCSRStorage && trivial_step) {
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
// slice the indptr of a csr
struct SliceCsrIndPtr {
template<typename IType>
MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) {
KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base);
}
};
/*
* a wrapper to launch SliceCsrIndPtr kernel.
* slice [src[begin] .. src[end]) and store in dst[0, end - begin)
*/
template<typename xpu, typename IType>
void SliceCsrIndPtrImpl(const int begin, const int end, RunContext ctx,
const IType* src, IType* dst) {
using namespace mshadow;
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
int indptr_len = end - begin + 1;
Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin);
}
/*
* Slice a CSR NDArray for first dimension
*/
template<typename xpu>
void SliceDimOneCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out) {
using namespace mshadow;
using namespace mxnet_op;
using namespace csr;
nnvm::dim_t begin_row = begin[0];
nnvm::dim_t end_row = end[0];
nnvm::dim_t indptr_len = end_row - begin_row + 1;
out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len));
// assume idx indptr share the same type
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, {
MSHADOW_TYPE_SWITCH(in.dtype(), DType, {
RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>();
RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>();
SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr);
Stream<xpu> *s = ctx.get_stream<xpu>();
RType nnz = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)),
Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s));
// return csr zeros if nnz = 0
if (nnz == 0) {
out.set_aux_shape(kIdx, Shape1(0));
return;
}
// copy indices and values
out.CheckAndAllocAuxData(kIdx, Shape1(nnz));
out.CheckAndAllocData(Shape1(nnz));
IType* in_idx = in.aux_data(kIdx).dptr<IType>();
IType* out_idx = out.aux_data(kIdx).dptr<IType>();
DType* in_data = in.data().dptr<DType>();
DType* out_data = out.data().dptr<DType>();
RType offset = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)),
Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s));
mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s),
Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s), s);
mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s),
Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s), s);
});
});
});
}
/*!
* \brief slice a CSRNDArray for two dimensions
*/
struct SliceDimTwoCsrAssign {
/*!
* \brief This function slices a CSRNDArray on axis one between begin_col and end_col
* \param i loop index
* \param out_idx output csr ndarray column indices
* \param out_data output csr ndarray data
* \param out_indptr output csr ndarray row index pointer
* \param in_idx input csr ndarray column indices
* \param in_data input csr ndarray data
* \param in_indptr input csr ndarray row index pointer
* \param begin_col begin column indice
* \param end_col end column indice
*/
template<typename IType, typename RType, typename DType>
MSHADOW_XINLINE static void Map(int i,
IType* out_idx, DType* out_data,
const RType* out_indptr,
const IType* in_idx, const DType* in_data,
const RType* in_indptr,
const int begin_col, const int end_col) {
RType ind = out_indptr[i];
for (RType j = in_indptr[i]; j < in_indptr[i+1]; j++) {
// indices of CSRNDArray are in ascending order per row
if (in_idx[j] >= end_col) {
break;
} else if (in_idx[j] >= begin_col) {
out_idx[ind] = in_idx[j] - begin_col;
out_data[ind] = in_data[j];
ind++;
}
}
}
};
/*
* Slice a CSR NDArray for two dimensions
*/
template<typename xpu>
void SliceDimTwoCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out);
template<typename xpu>
void SliceCsrImpl(const SliceParam ¶m, const OpContext& ctx,
const NDArray &in, OpReqType req, const NDArray &out) {
if (req == kNullOp) return;
CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported";
CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported";
const mxnet::TShape ishape = in.shape();
const mxnet::TShape oshape = out.shape();
int N = ishape.ndim();
mxnet::TShape begin(N, -1), end(N, -1);
for (int i = 0; i < N; ++i) {
int s = 0;
if (i < param.begin.ndim() && param.begin[i]) {
s = *param.begin[i];
if (s < 0) s += ishape[i];
}
begin[i] = s;
end[i] = s + oshape[i];
}
switch (N) {
case 1: {
SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
case 2: {
SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
default:
LOG(FATAL) << "CSR is only for 2-D shape";
break;
}
}
template<typename xpu>
void SliceEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs.size(), 1);
CHECK_EQ(outputs.size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
auto in_stype = inputs[0].storage_type();
if (in_stype == kCSRStorage) {
SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]);
} else {
LOG(FATAL) << "Slice not implemented for storage type" << in_stype;
}
}
template<int ndim>
inline bool GetIndexRange(const mxnet::TShape& dshape,
const mxnet::Tuple<dmlc::optional<index_t>>& param_begin,
const mxnet::Tuple<dmlc::optional<index_t>>& param_end,
const mxnet::Tuple<dmlc::optional<index_t>>& param_step,
common::StaticArray<index_t, ndim>* begin,
common::StaticArray<index_t, ndim>* end,
common::StaticArray<index_t, ndim>* step) {
// Function returns false if output is zero-sized, true otherwise.
bool zero_size_shape = false;
CHECK_NE(dshape.ndim(), 0U);
CHECK_LE(param_begin.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_LE(param_end.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_EQ(param_begin.ndim(), param_end.ndim())
<< "begin and end must have the same length";
CHECK_EQ(ndim, dshape.ndim())
<< "Static array size=" << ndim
<< " is not equal to data shape ndim=" << dshape.ndim();
if (param_step.ndim() > 0) {
CHECK_EQ(param_step.ndim(), param_begin.ndim())
<< "step and begin must have the same length";
}
for (int i = 0; i < param_begin.ndim(); ++i) {
index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1;
CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0";
index_t b = 0, e = 0;
const index_t len = dshape[i];
if (len > 0) {
b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0);
e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len);
if (b < 0) {
b += len;
}
if (e < 0 && param_end[i].has_value()) {
e += len;
}
// move the begin and end to correct position for calculating dim size
b = (b < 0 && s > 0) ? 0 : b;
b = (b > len - 1 && s < 0) ? len - 1 : b;
// if the start value lead to empty tensor under step s, use -1 for indication
b = (b < 0 || b > len - 1) ? -1 : b;
e = e > -1 ? e : -1;
e = e > len ? len : e;
} else if (len == 0) {
b = 0;
e = 0;
}
(*begin)[i] = b;
(*end)[i] = e;
(*step)[i] = s;
// checking begin==end
if (b == e) {
zero_size_shape = true;
}
}
for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) {
(*begin)[i] = 0;
(*end)[i] = dshape[i];
(*step)[i] = 1;
}
return zero_size_shape;
}
inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape,
const index_t i, const index_t b,
const index_t e, const index_t s,
mxnet::TShape* oshape) {
if (!mxnet::dim_size_is_known(dshape, i)) {
(*oshape)[i] = -1;
return;
}
if (e != b && b >= 0) {
if (s > 0) {
(*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0;
} else {
(*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0;
}
} else {
(*oshape)[i] = 0;
}
}
inline bool SliceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0";
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
mxnet::TShape oshape = dshape;
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape);
}
})
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(dshape) && shape_is_known(oshape);
}
template<int ndim, int req, typename xpu>
struct slice_forward;
template<int ndim, int req>
struct slice_forward<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[i], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
};
template<int ndim, int req>
struct slice_forward<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t out_offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[out_offset++], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
}
};
template<typename xpu>
void SliceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (out.Size() == 0) return;
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
size_t num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<int ndim, int req, typename xpu>
struct slice_assign;
template<int ndim, int req>
struct slice_assign<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[offset++]);
}
}
};
template<int ndim, int req>
struct slice_assign<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[i]);
}
};
template<typename xpu>
void SliceOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_backward does not support kWriteInplace";
}
if (ograd.Size() == 0) return;
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape);
}
})
SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename xpu>
void SliceAssignOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& val = inputs[1];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace";
}
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspace needs no operation.
}
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = val.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= val.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), val.dptr<DType>(),
out.shape_.get<ndim>(), val.shape_.get<ndim>(), begin, step);
})
})
})
}
struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> {
double scalar;
mxnet::Tuple<dmlc::optional<index_t>> begin, end;
mxnet::Tuple<dmlc::optional<index_t>> step;
DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) {
DMLC_DECLARE_FIELD(scalar)
.set_default(0)
.describe("The scalar value for assignment.");
DMLC_DECLARE_FIELD(begin)
.describe("starting indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(end)
.describe("ending indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(step)
.set_default(mxnet::Tuple<dmlc::optional<index_t>>())
.describe("step for the slice operation, supports negative values.");
}
};
inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<int ndim>
struct slice_assign_scalar {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType val,
const OpReqType req,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val);
}
}
};
template<typename xpu>
void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace";
}
mxnet::TShape vshape = data.shape_;
const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspaced needs no operation.
}
for (index_t i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape);
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s, vshape.FlatTo2D()[0],
out.dptr<DType>(), static_cast<DType>(param.scalar), req[0],
out.shape_.get<ndim>(), vshape.get<ndim>(), begin, step);
})
})
}
struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> {
int axis;
index_t begin;
dmlc::optional<index_t> end;
DMLC_DECLARE_PARAMETER(SliceAxisParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Axis along which to be sliced, supports negative indexes.");
DMLC_DECLARE_FIELD(begin)
.describe("The beginning index along the axis to be sliced, "
" supports negative indexes.");
DMLC_DECLARE_FIELD(end)
.describe("The ending index along the axis to be sliced, "
" supports negative indexes.");
}
};
inline void GetSliceAxisParams(const SliceAxisParam& param, const mxnet::TShape& ishape,
int* axis, index_t* begin, index_t* end) {
*axis = param.axis;
if (*axis < 0) {
*axis += ishape.ndim();
}
CHECK(*axis < ishape.ndim() && *axis >= 0) <<
"Transformed axis must be smaller than the source ndim and larger than zero! Recieved axis=" <<
param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis;
index_t axis_size = static_cast<index_t>(ishape[*axis]);
*begin = param.begin;
*end = -1;
if (*begin < 0) {
*begin += axis_size;
}
if (axis_size > 0) {
if (!static_cast<bool>(param.end)) {
*end = axis_size;
} else {
*end = param.end.value();
if (*end < 0) {
*end += axis_size;
}
}
CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size;
CHECK((*begin < *end))
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
} else {
*begin = 0;
*end = 0;
}
CHECK(*end >= 0)
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin;
}
inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) return false;
int axis;
index_t begin, end;
GetSliceAxisParams(param, ishape, &axis, &begin, &end);
if (!mxnet::dim_size_is_known(ishape, axis)) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return false;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = static_cast<index_t>(end - begin);
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
return shape_is_known(shape);
}
template<typename xpu>
void SliceAxis(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow::expr;
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> in =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> out =
outputs[0].FlatTo2D<xpu, DType>(s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> in =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> out =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
}
}
// Backward pass of broadcast over the given axis
template<typename xpu>
void SliceAxisGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (outputs[0].shape_.Size() == 0) {
return;
}
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
using namespace mshadow::op;
using namespace mshadow::expr;
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].shape_.ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> ograd =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> igrad =
outputs[0].FlatTo2D<xpu, DType>(s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> ograd =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> igrad =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
}
}
struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> {
mxnet::Tuple<int> axes;
DMLC_DECLARE_PARAMETER(SliceLikeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::Tuple<int>())
.describe("List of axes on which input data will be sliced according to the "
"corresponding size of the second input. By default will slice on "
"all axes. Negative axes are supported.");
}
};
inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& from_shape = (*in_attrs)[1];
if (param.axes.ndim() == 0) {
CHECK_EQ(ishape.ndim(), from_shape.ndim())
<< "By default slice_axis performs slice on all axes, but ndim mismatch "
"for inputs: " << ishape.ndim() << " vs. " << from_shape.ndim();
for (int i = 0; i < ishape.ndim(); ++i) {
CHECK_GE(ishape[i], from_shape[i])
<< "Slice axis " << i << " with size " << from_shape[i]
<< "exceeds limit of input with size " << ishape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape);
} else {
mxnet::TShape shape(ishape);
for (int i = 0; i < param.axes.ndim(); ++i) {
int axis = param.axes[i];
if (axis < 0) {
axis += ishape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << param.axes[i] << " too small";
CHECK_GT(ishape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds first input: " << ishape.ndim();
CHECK_GT(from_shape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim();
shape[axis] = from_shape[axis];
CHECK_GE(ishape[axis], from_shape[axis])
<< "Slice axis " << axis << " with size " << from_shape[axis]
<< "exceeds limit of input with size " << ishape[axis];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return true;
}
inline void SliceLikeInferRanges(const mxnet::TShape& dshape,
const mxnet::TShape& fshape,
const mxnet::Tuple<int>& axes,
mxnet::Tuple<dmlc::optional<index_t>>* param_begin,
mxnet::Tuple<dmlc::optional<index_t>>* param_end,
mxnet::Tuple<dmlc::optional<index_t>>* param_step) {
std::vector<dmlc::optional<index_t>> pb(dshape.ndim());
std::vector<dmlc::optional<index_t>> pe(dshape.ndim());
std::vector<dmlc::optional<index_t>> ps(dshape.ndim());
if (axes.ndim() == 0) {
for (int i = 0; i < dshape.ndim(); ++i) {
pb[i] = 0;
pe[i] = fshape[i];
ps[i] = 1;
}
} else {
for (int i = 0; i < axes.ndim(); ++i) {
int axis = axes[i];
if (axis < 0) {
axis += dshape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << axes[i] << " too small";
CHECK_LT(axis, dshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << dshape.ndim();
CHECK_LT(axis, fshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << fshape.ndim();
pb[axis] = 0;
pe[axis] = fshape[axis];
ps[axis] = 1;
}
}
*param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end());
*param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end());
*param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end());
}
template<typename xpu>
void SliceLikeForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow::expr;
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& from_shape = inputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s,
num_threads, out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<typename xpu>
void SliceLikeBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req.size(), 2U);
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
if (req[1] != kNullOp && req[1] != kAddTo) {
Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients.
}
if (req[0] == kNullOp) return;
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_like_backward does not support kWriteInplace";
}
const mxnet::TShape& ishape = ograd.shape_;
const mxnet::TShape& from_shape = outputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
struct ClipParam : public dmlc::Parameter<ClipParam> {
real_t a_min, a_max;
DMLC_DECLARE_PARAMETER(ClipParam) {
DMLC_DECLARE_FIELD(a_min)
.describe("Minimum value");
DMLC_DECLARE_FIELD(a_max)
.describe("Maximum value");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream a_min_s, a_max_s;
a_min_s << a_min;
a_max_s << a_max;
(*dict)["a_min"] = a_min_s.str();
(*dict)["a_max"] = a_max_s.str();
}
};
struct clip {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = a_max;
} else if (data < a_min) {
out[i] = a_min;
} else {
out[i] = data;
}
}
};
struct clip_grad {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* grad, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = 0;
} else if (data < a_min) {
out[i] = 0;
} else {
out[i] = grad[i];
}
}
};
template<typename xpu>
void Clip(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
param.a_min, param.a_max);
});
}
template<typename xpu>
void ClipEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs[0].dtype(), outputs[0].dtype());
CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type());
CHECK_NE(inputs[0].storage_type(), kDefaultStorage);
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>);
}
template<typename xpu>
void ClipGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<clip_grad, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), param.a_min, param.a_max);
});
}
/*!
* \brief The parameters of the repeat operator include
* the number of repeating time and axis (optional).
* The parameters will be later used to deduce the
* output ndarray shape in bool RepeatShape() function.
*/
struct RepeatParam : public dmlc::Parameter<RepeatParam> {
int repeats = 1;
dmlc::optional<int> axis;
DMLC_DECLARE_PARAMETER(RepeatParam) {
DMLC_DECLARE_FIELD(repeats)
.describe("The number of repetitions for each element.");
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<int>())
.describe("The axis along which to repeat values."
" The negative numbers are interpreted counting from the backward."
" By default, use the flattened input array,"
" and return a flat output array.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream repeats_s, axis_s;
repeats_s << repeats;
axis_s << axis;
(*dict)["repeats"] = repeats_s.str();
(*dict)["axis"] = axis_s.str();
}
};
/*!
* \brief Helper function for getting user input params for the operator repeat.
* Sanity check the user input values.
*/
inline void GetRepeatParams(const RepeatParam& param, const mxnet::TShape& ishape,
int* repeats, dmlc::optional<int>* axisOpt) {
*repeats = param.repeats;
CHECK_GE(*repeats, 0) << "repeats cannot be a negative number";
*axisOpt = param.axis;
if (static_cast<bool>(*axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt->value();
if (axis < 0) {
axis += ndims;
}
CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds";
}
}
inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& ishape = (*in_attrs)[0];
int repeats = 0;
dmlc::optional<int> axisOpt;
GetRepeatParams(param, ishape, &repeats, &axisOpt);
// If 0 repeats, return an empty 1-dim, 0-size array
if (0 == repeats) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0));
return true;
}
// If repeats > 0, multiply the size of the corresponding axis by repeats
if (static_cast<bool>(axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt.value();
if (axis < 0) {
axis += ndims;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = repeats * ishape[i];
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
} else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats
mxnet::TShape shape(1, ishape.Size() * repeats);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return shape_is_known(out_attrs->at(0));
}
inline bool RepeatOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the funcitonality
* of operator repeat.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp(
const mxnet::TShape& ishape,
const dmlc::optional<int>& axisOpt,
const int repeats) {
if (static_cast<bool>(axisOpt)) {
int axis = axisOpt.value();
int ndim = ishape.ndim();
if (axis < 0) {
axis += ndim;
}
CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis";
// reshape the input tensor by adding a dim at the (axis+1)-th dim
mxnet::TShape rshape(ishape.ndim()+1, 1);
// the shape we want to broadcast to
mxnet::TShape bshape(rshape.ndim(), 1);
int i = 0;
while (i <= axis) {
rshape[i] = bshape[i] = ishape[i];
++i;
}
rshape[i] = 1;
bshape[i] = repeats;
while (i < ishape.ndim()) {
rshape[i+1] = ishape[i];
bshape[i+1] = ishape[i];
++i;
}
return std::make_pair(rshape, bshape);
} else {
// axis is not input by user
// reshape the tensor into shape (ishape.Size(), 1)
// then add one dim at axis = 1 and broadcast to
// shape (ishape.Size(), repeats)
mxnet::TShape rshape(2, 1);
rshape[0] = ishape.Size();
rshape[1] = 1;
mxnet::TShape bshape(2, 1);
bshape[0] = rshape[0];
bshape[1] = repeats;
return std::make_pair(rshape, bshape);
}
}
template<typename xpu>
void RepeatOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TBlob& iTBlob = inputs[0];
const mxnet::TShape& ishape = iTBlob.shape_;
if (!shape_is_known(ishape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, ishape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes = \
ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void RepeatOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& oshape = outputs[0].shape_;
if (!shape_is_known(oshape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, oshape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes =
ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct TileParam : public dmlc::Parameter<TileParam> {
mxnet::Tuple<int> reps;
DMLC_DECLARE_PARAMETER(TileParam) {
DMLC_DECLARE_FIELD(reps)
.describe("The number of times for repeating the tensor a. Each dim size of reps"
" must be a positive integer."
" If reps has length d, the result will have dimension of max(d, a.ndim);"
" If a.ndim < d, a is promoted to be d-dimensional by prepending new axes."
" If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream reps_s;
reps_s << reps;
(*dict)["reps"] = reps_s.str();
}
};
inline bool TileOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const TileParam& param = nnvm::get<TileParam>(attrs.parsed);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!shape_is_known(ishape)) {
return false;
}
const mxnet::Tuple<int>& reps = param.reps;
// If reps is empty, return a identical input array
if (reps.ndim() == 0) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return true;
}
mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = oshape.ndim() - 1; i >= 0; --i) {
if (i1 >= 0 && i2 >= 0) {
oshape[i] = ishape[i1--] * reps[i2--];
} else if (i1 >= 0) {
oshape[i] = ishape[i1--];
} else if (i2 >= 0) {
oshape[i] = reps[i2--];
}
}
// If reps contains 0s, oshape is a zero-size shape.
// Need to distinguish between np_shape mode and legacy mode.
if (!Imperative::Get()->is_np_shape()) {
common::ConvertToNumpyShape(&oshape);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
inline bool TileOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the functionality
* of operator tile.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp(
const mxnet::TShape& ishape,
const mxnet::Tuple<int>& reps) {
if (reps.ndim() == 0) {
return std::make_pair(ishape, ishape);
}
// The shape we want to broadcast to
mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1);
// The shape of the input tensor after adding new axes before each dim
mxnet::TShape rshape(bshape.ndim(), 1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = bshape.ndim() - 1; i >= 0; --i) {
if (0 == (i & 1)) {
bshape[i] = (i2 >= 0? reps[i2--] : 1);
rshape[i] = 1;
} else {
rshape[i] = bshape[i] = (i1 >= 0? ishape[i1--] : 1);
}
}
return std::make_pair(rshape, bshape);
}
/*!
* \brief Implementation of tiling the input tensor a based
* on the user-input shape, reps.
* If a.ndim < reps.ndim, new axes are pre-pended to a. For example,
* the input tensor has shape (3,), and the reps is (2, 4); the input
* tensor would be reshaped to (1, 3).
* If a.ndim > reps.ndim, pre-pending 1's to reps. For example,
* the input tensor has shape (2, 3, 4, 5), and reps is (2, 2);
* the reps would be changed to (1, 1, 2, 2).
* Suppose we have a.ndim = reps.ndim now. To achieve tiling,
* we utilize the operator broadcast_to. For example, for a tensor
* of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape
* the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding
* one axis before each dimension. Then, we want to broadcast
* the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final
* output tensor would have shape (2*2, 8*3, 9*4, 3*5).
*/
template<typename xpu>
void TileOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& ishape = inputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void TileOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& oshape = outputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct ReverseParam : public dmlc::Parameter<ReverseParam> {
mxnet::Tuple<int> axis;
DMLC_DECLARE_PARAMETER(ReverseParam) {
DMLC_DECLARE_FIELD(axis)
.describe("The axis which to reverse elements.");
}
};
#define REVERSE_MAX_DIM 10U
struct reverse {
MSHADOW_XINLINE static index_t ReverseIndex(index_t idx,
index_t nreversedim,
const index_t * stride_,
const index_t * trailing_) {
index_t outputIndex = idx;
for (index_t i = 0; i < nreversedim; ++i) {
const index_t low = outputIndex % trailing_[i];
index_t high = outputIndex / trailing_[i];
const index_t x = high%stride_[i];
high /= stride_[i];
outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low;
}
return outputIndex;
}
#ifdef __CUDACC__
template<typename DType>
__device__ static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
__shared__ index_t stride_share[REVERSE_MAX_DIM];
__shared__ index_t trailing_share[REVERSE_MAX_DIM];
if (threadIdx.x < REVERSE_MAX_DIM) {
stride_share[threadIdx.x] = stride_[threadIdx.x];
trailing_share[threadIdx.x] = trailing_[threadIdx.x];
}
__syncthreads();
index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share);
dst[new_idx] = src[index];
}
#else
template<typename DType>
MSHADOW_XINLINE static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_);
dst[new_idx] = src[index];
}
#endif
};
template<typename xpu>
void ReverseOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM);
Stream<xpu> *s = ctx.get_stream<xpu>();
const mxnet::TShape& ishape = inputs[0].shape_;
std::vector<index_t> stride_(param.axis.ndim());
std::vector<index_t> trailing_(param.axis.ndim());
index_t reverse_index = 0;
for (int axis : param.axis) {
CHECK_LT(axis, ishape.ndim());
stride_[reverse_index] = ishape[axis];
trailing_[reverse_index] = 1;
for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) {
trailing_[reverse_index] *= ishape[i2];
}
reverse_index++;
}
#ifdef __CUDACC__
mshadow::Tensor<xpu, 1, uint8_t> workspace =
ctx.requested[0].get_space_typed<xpu, 1, uint8_t>(
mshadow::Shape1(reverse_index * sizeof(index_t) * 2), s);
auto stride_workspace = workspace.dptr_;
auto trailing_workspace = workspace.dptr_ + reverse_index * sizeof(index_t);
cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()),
stride_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()),
trailing_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
#endif
#ifdef __CUDACC__
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace));
});
#else
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
stride_.data(), trailing_.data());
});
#endif
}
struct StackParam : public dmlc::Parameter<StackParam> {
int axis;
int num_args;
DMLC_DECLARE_PARAMETER(StackParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(0)
.describe("The axis in the result array along which the input arrays are stacked.");
DMLC_DECLARE_FIELD(num_args).set_lower_bound(1)
.describe("Number of inputs to be stacked.");
}
};
inline bool StackOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
mxnet::TShape dshape;
for (const mxnet::TShape& i : (*in_attrs)) {
shape_assign(&dshape, i);
}
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape(dshape.ndim() + 1, -1);
int axis = CheckAxis(param.axis, oshape.ndim());
for (int i = 0; i < axis; ++i) {
oshape[i] = dshape[i];
}
oshape[axis] = param.num_args;
for (index_t i = axis + 1; i < oshape.ndim(); ++i) {
oshape[i] = dshape[i-1];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
template<typename xpu>
void StackOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, outputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > data(inputs.size());
Tensor<xpu, 3, DType> out;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= outputs[0].shape_[i];
}
for (int i = axis + 1; i < outputs[0].ndim(); ++i) {
trailing *= outputs[0].shape_[i];
}
size_t mid = outputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < inputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Concatenate(data, &out, 1, req[0]);
})
}
template<typename xpu>
void StackOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > grad_in(outputs.size());
Tensor<xpu, 3, DType> grad;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= inputs[0].shape_[i];
}
for (int i = axis + 1; i < inputs[0].ndim(); ++i) {
trailing *= inputs[0].shape_[i];
}
size_t mid = inputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < outputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Split(grad, &grad_in, 1, req);
})
}
struct SqueezeParam : public dmlc::Parameter<SqueezeParam> {
dmlc::optional<mxnet::Tuple<int>> axis;
DMLC_DECLARE_PARAMETER(SqueezeParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<mxnet::Tuple<int>>())
.describe("Selects a subset of the single-dimensional entries in the shape."
" If an axis is selected with shape entry greater than one, an error is raised.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s;
axis_s << axis;
(*dict)["axis"] = axis_s.str();
}
};
// Given a shape that may have dim size equal to 0,
// move all the zeros to the last of the shape array
// and keep the relative order of the non-zero values.
// Returns the new shape size after moving all zeros to the end.
inline size_t SqueezeShapeHelper(mxnet::TShape* shape) {
CHECK(shape != nullptr);
size_t count = 0;
for (int i = 0; i < shape->ndim(); ++i) {
if ((*shape)[i] == -1) {
++count;
} else {
std::swap((*shape)[i], (*shape)[i-count]);
}
}
return shape->ndim() - count;
}
inline bool SqueezeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = in_attrs->at(0);
const int dndim = dshape.ndim();
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape = dshape;
if (param.axis.has_value()) {
// preprocess axis
mxnet::Tuple<int> axes = param.axis.value();
for (int i = 0; i < axes.ndim(); ++i) {
if (axes[i] < 0) {
axes[i] += dndim;
CHECK_GE(axes[i], 0)
<< "axis " << axes[i] - dndim << " is out of bounds for array of dimension " << dndim;
}
CHECK_LT(axes[i], dndim)
<< "axis " << axes[i] << " is out of bounds for array of dimension " << dndim;
CHECK_EQ(dshape[axes[i]], 1)
<< "cannot select an axis to squeeze out which has size="
<< dshape[axes[i]] << " not equal to one";
CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis";
oshape[axes[i]] = -1;
}
} else {
for (int i = 0; i < oshape.ndim(); ++i) {
if (oshape[i] == 1) oshape[i] = -1;
}
}
size_t oshape_size = SqueezeShapeHelper(&oshape);
if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1)
oshape[0] = 1;
oshape_size = 1;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data()+oshape_size));
return true;
}
struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> {
int block_size;
DMLC_DECLARE_PARAMETER(DepthToSpaceParam) {
DMLC_DECLARE_FIELD(block_size)
.describe("Blocks of [block_size. block_size] are moved");
}
};
inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor";
mxnet::TShape expected_out(4, -1);
mxnet::TShape& in_shape = in_attrs->at(0);
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_EQ(in_shape[1] % (block * block), 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:1(depth dimension) should be a multiple of 'block^2'";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] / (block * block);
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] * block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function updates the value of input index from where the data element
* needs to be fetched and written out to the ith location in output tensor
* \param index_position index within offset array to get offset of given dimension
* \param dim_size size of current dimension
* \param idx output tensor index
* \param inp_index index within input tensor from where value is retrieved
* \param offset_arr array containing the linear offset of input tensor
*/
MSHADOW_XINLINE void update_index(index_t index_position, index_t dim_size, index_t *idx,
index_t *inp_index, const index_t* offset_arr) {
index_t next_idx_val = *idx / dim_size;
*inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position];
*idx = next_idx_val;
}
/*!
* \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct depth_to_space_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data,
const int block, const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[3];
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2];
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1] / (block * block);
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing depth_to_space operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_depth_to_space {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1, const index_t size2,
const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * size[3];
offset_arr[3] = offset_arr[4] * size[2];
offset_arr[2] = offset_arr[3] * size[1] / (block * block);
offset_arr[1] = offset_arr[2] * block;
offset_arr[0] = offset_arr[1] * block;
}
};
template<typename xpu>
void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<depth_to_space_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor";
mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1);
mxnet::TShape& in_shape = in_attrs->at(0);
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_EQ(in_shape[2] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:2(1st Space dimension) should be a multiple of 'block' ";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
CHECK_EQ(in_shape[3] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:3(2nd space dimension) should be a multiple of 'block' ";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] * block * block;
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] / block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct space_to_depth_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block,
const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = size[3] / block;
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2] / block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1];
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing space_to_depth operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_space_to_depth {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1,
const index_t size2, const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * block;
offset_arr[3] = offset_arr[4] * size[3] / block;
offset_arr[2] = offset_arr[3] * block;
offset_arr[1] = offset_arr[2] * size[2] / block;
offset_arr[0] = offset_arr[1] * size[1];
}
};
template<typename xpu>
void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<space_to_depth_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
namespace split_enum {
enum SplitOpInputs {kData};
} // namespace split_enum
struct SplitParam : public dmlc::Parameter<SplitParam> {
mxnet::TShape indices;
int axis;
bool squeeze_axis;
int sections;
DMLC_DECLARE_PARAMETER(SplitParam) {
DMLC_DECLARE_FIELD(indices)
.describe("Indices of splits. The elements should denote the boundaries of at which split"
" is performed along the `axis`.");
DMLC_DECLARE_FIELD(axis).set_default(1)
.describe("Axis along which to split.");
DMLC_DECLARE_FIELD(squeeze_axis).set_default(0)
.describe("If true, Removes the axis with length 1 from the shapes of the output arrays."
" **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1"
" only along the `axis` which it is split."
" Also `squeeze_axis` can be set to ``true``"
" only if ``input.shape[axis] == num_outputs``.");
DMLC_DECLARE_FIELD(sections).set_default(0)
.describe("Number of sections if equally splitted. Default to 0 which means split by indices.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream indices_s, axis_s, squeeze_axis_s, sections_s;
indices_s << indices;
axis_s << axis;
squeeze_axis_s << squeeze_axis;
sections_s << sections;
(*dict)["indices"] = indices_s.str();
(*dict)["axis"] = axis_s.str();
(*dict)["squeeze_axis"] = squeeze_axis_s.str();
(*dict)["sections"] = sections_s.str();
}
}; // struct SplitParam
inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) {
mxnet::TShape indices(sections+1, -1);
indices[0] = 0;
int64_t section_size_b = (int64_t) (ishape[axis] / sections);
int64_t section_size_a = section_size_b + 1;
int section_a = ishape[axis] % sections;
for (int i = 0; i < sections; ++i) {
if ( i < section_a ) {
indices[i+1] = section_size_a * (i + 1);
} else {
indices[i+1] = section_size_b + indices[i];
}
}
return indices;
}
inline bool SplitOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
int dtype = (*in_attrs)[0];
CHECK_NE(dtype, -1) << "First input must have specified type";
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
out_attrs->clear();
int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim();
for (int i = 0; i < num_outputs; ++i) {
out_attrs->push_back(dtype);
}
return true;
}
inline bool SplitOpShapeImpl(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs,
const int real_axis) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
mxnet::TShape ishape = in_attrs->at(split_enum::kData);
const mxnet::TShape indices =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim();
// Pre-compute squeezed output shape for future usage
mxnet::TShape squeezed_dshape = dshape;
for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) {
squeezed_dshape[d] = squeezed_dshape[d+1];
}
squeezed_dshape = mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim()-1]);
// Assign shape to every output
for (int i = 0; i < num_outputs; ++i) {
int start = indices[i];
int end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis];
if (ishape[real_axis] == 0U) {
end = start;
} else {
CHECK(start <= end)
<< "start " << start << " is not less than end " << end << "for subarray " << i;
CHECK(end <= ishape[real_axis])
<< "end " << end << " is no less than the size of the axis " << ishape[real_axis];
}
dshape[real_axis] = (end - start);
if (param.squeeze_axis) {
CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start;
SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape);
}
}
mxnet::TShape back_calculate_dshape = ishape;
back_calculate_dshape[real_axis] = 0;
for (int d = 0; d < real_axis; ++d) {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
if (param.squeeze_axis) {
back_calculate_dshape[real_axis] = num_outputs;
} else {
for (int i = 0; i < num_outputs; ++i) {
back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis];
}
}
for (int d = real_axis + 1; d < ishape.ndim(); ++d) {
if (param.squeeze_axis) {
back_calculate_dshape[d] = (*out_attrs)[0][d - 1];
} else {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape);
return true;
}
inline bool SplitOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
if (!mxnet::ndim_is_known(dshape)) return false;
if (param.axis >= 0) {
CHECK_LT(param.axis, dshape.ndim());
} else {
CHECK_LT(param.axis + dshape.ndim(), dshape.ndim());
}
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += dshape.ndim();
}
return SplitOpShapeImpl(attrs, in_attrs, out_attrs, real_axis);
}
struct SplitKernel {
/*!
* \brief Map function for forward split_v2 operator
* \param i global thread id
* \param in_data ptr to input buffer
* \param out_data ptr to ptr of outputs buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
const DType *in_data, DType** out_data, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t target = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
target = section++) {}
DType* target_data = out_data[target];
const size_t mid_idx = idx - indices[target];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[target + 1] - indices[target];
const size_t target_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
target_data[target_idx] = in_data[i];
}
};
struct ConcatenateKernel {
/*!
* \brief Map function for backward split_v2 operator
* \param i global thread id
* \param out_grad ptr to ptr of out grads buffer
* \param in_grad ptr to input grad buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
DType** out_grad, DType* in_grad, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t src = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
src = section++) {}
DType* src_grad = out_grad[src];
const size_t mid_idx = idx - indices[src];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[src + 1] - indices[src];
const size_t src_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
in_grad[i] = src_grad[src_idx];
}
};
template<typename xpu>
inline void SplitOpForwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& input_data = inputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_data.ndim());
size_t mid = input_data.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_data.shape_[i];
}
for (int i = real_axis + 1; i < input_data.ndim(); ++i) {
trailing *= input_data.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_data.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, {
std::vector<DType*> output_data;
for (const TBlob& data : outputs) {
output_data.push_back(data.dptr<DType>());
}
workspace_size += output_data.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(output_data.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<SplitKernel, xpu>::Launch(
s, input_data.Size(), input_data.dptr<DType>(), ptrs_xpu_tensor.dptr_,
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim());
const TBlob& input_data = inputs[split_enum::kData];
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += input_data.ndim();
}
SplitOpForwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
template<typename xpu>
inline void SplitOpBackwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
TBlob input_grad = outputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_grad.ndim());
size_t mid = input_grad.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_grad.shape_[i];
}
for (int i = real_axis + 1; i < input_grad.ndim(); ++i) {
trailing *= input_grad.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_grad.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, {
std::vector<DType*> out_grads;
for (const TBlob& output_grad : inputs) {
out_grads.push_back(output_grad.dptr<DType>());
}
workspace_size += out_grads.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(inputs.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<ConcatenateKernel, xpu>::Launch(
s, input_grad.Size(), ptrs_xpu_tensor.dptr_, input_grad.dptr<DType>(),
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim())
<< "out grad vector size mush match the output size";
CHECK_EQ(outputs.size(), 1U);
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += outputs[split_enum::kData].ndim();
}
SplitOpBackwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) {
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
return (param.sections > 0) ? param.sections : param.indices.ndim();
}
} // namespace op
} // namespace mxnet
namespace std {
template<>
struct hash<mxnet::op::TransposeParam> {
size_t operator()(const mxnet::op::TransposeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axes);
return ret;
}
};
template<>
struct hash<mxnet::op::ReshapeParam> {
size_t operator()(const mxnet::op::ReshapeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.target_shape);
ret = dmlc::HashCombine(ret, val.keep_highest);
ret = dmlc::HashCombine(ret, val.shape);
ret = dmlc::HashCombine(ret, val.reverse);
return ret;
}
};
template<>
struct hash<mxnet::op::ExpandDimParam> {
size_t operator()(const mxnet::op::ExpandDimParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
|
GB_unop__creal_fp64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__creal_fp64_fc64
// op(A') function: GB_unop_tran__creal_fp64_fc64
// C type: double
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = creal (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = creal (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = creal (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CREAL || GxB_NO_FP64 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__creal_fp64_fc64
(
double *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = creal (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__creal_fp64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: define the width and height of the border.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,const CompositeOperator compose,
ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,compose,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
const CompositeOperator compose,ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
accentuate,
highlight,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
x=(ssize_t) frame_info->width-frame_info->x-bevel_width;
y=(ssize_t) frame_info->height-frame_info->y-bevel_width;
if ((x < (ssize_t) image->columns) || (y < (ssize_t) image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass,exception) == MagickFalse)
{
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&frame_image->border_color) == MagickFalse) &&
(IsGrayColorspace(frame_image->colorspace) != MagickFalse))
(void) SetImageColorspace(frame_image,sRGBColorspace,exception);
if ((frame_image->matte_color.alpha_trait != UndefinedPixelTrait) &&
(frame_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(frame_image,OpaqueAlpha,exception);
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
matte=image->matte_color;
accentuate=matte;
accentuate.red=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.black=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.black+(QuantumRange*AccentuateModulate)));
accentuate.alpha=matte.alpha;
highlight=matte;
highlight.red=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.black=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.black+(QuantumRange*HighlightModulate)));
highlight.alpha=matte.alpha;
shadow=matte;
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.black=QuantumScale*matte.black*ShadowModulate;
shadow.alpha=matte.alpha;
trough=matte;
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.black=QuantumScale*matte.black*TroughModulate;
trough.alpha=matte.alpha;
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
frame_view=AcquireAuthenticCacheView(frame_image,exception);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,frame_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
size_t
width;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
/*
Set frame interior pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(frame_image,&frame_image->border_color,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FrameImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw bottom of ornamental border.
*/
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (status != MagickFalse)
status=CompositeImage(frame_image,image,compose,MagickTrue,x,y,
exception);
if (status == MagickFalse)
frame_image=DestroyImage(frame_image);
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise,
ExceptionInfo *exception)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=QuantumRange;
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,raise_info->height,1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*AccentuateFactor+
(double) foreground*(QuantumRange-AccentuateFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RaiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows-2*raise_info->height,1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q+=GetPixelChannels(image);
for ( ; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RaiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows-raise_info->height,1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*TroughFactor+
(double) background*(QuantumRange-TroughFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RaiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
GxB_Type_name.c | //------------------------------------------------------------------------------
// GxB_Type_name: return the name of a type
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_Type_name // return the name of a GraphBLAS type
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
GrB_Type type
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_Type_name (type_name, type)") ;
GB_RETURN_IF_NULL (type_name) ;
GB_RETURN_IF_NULL_OR_FAULTY (type) ;
//--------------------------------------------------------------------------
// return the type_name
//--------------------------------------------------------------------------
memcpy (type_name, type->name, GxB_MAX_NAME_LEN) ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
GB_AxB_dot2_template.c | //------------------------------------------------------------------------------
// GB_AxB_dot2_template: C=A'B, C<!M>=A'*B, or C<M>=A'*B via dot products
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// A and B are sparse, bitmap, or full; never hypersparse. If the input
// matrices A and/or B are hypersparse, they are converted into hyper_shallow
// sparse matrices, and C is converted from bitmap to sparse/hypersparse when
// done.
// If A_NOT_TRANSPOSED is #defined, the C=A*B or C<#M>=A*B is computed.
// In this case A is bitmap or full, and B is sparse.
// GB_DOT_ALWAYS_SAVE_CIJ: C(i,j) = cij
#undef GB_DOT_ALWAYS_SAVE_CIJ
#if GB_C_IS_FULL
#define GB_DOT_ALWAYS_SAVE_CIJ \
{ \
GB_PUTC (cij, pC) ; \
}
#else
#define GB_DOT_ALWAYS_SAVE_CIJ \
{ \
GB_PUTC (cij, pC) ; \
Cb [pC] = 1 ; \
task_cnvals++ ; \
}
#endif
// GB_DOT_SAVE_CIJ: C(i,j) = cij, unless already done by GB_DOT
#undef GB_DOT_SAVE_CIJ
#if GB_IS_ANY_MONOID
// for the ANY monoid, GB_DOT saves C(i,j) as soon as a value is found
#define GB_DOT_SAVE_CIJ
#else
// all other monoids: C(i,j) = cij if it exists
#define GB_DOT_SAVE_CIJ \
{ \
if (GB_CIJ_EXISTS) \
{ \
GB_DOT_ALWAYS_SAVE_CIJ ; \
} \
}
#endif
#if ( !GB_A_IS_HYPER && !GB_B_IS_HYPER )
{
//--------------------------------------------------------------------------
// C=A'*B, C<M>=A'*B, or C<!M>=A'*B where C is bitmap
//--------------------------------------------------------------------------
int tid ;
#if GB_C_IS_FULL
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
#else
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
#endif
for (tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
const int a_tid = tid / nbslice ;
const int b_tid = tid % nbslice ;
const int64_t kA_start = A_slice [a_tid] ;
const int64_t kA_end = A_slice [a_tid+1] ;
const int64_t kB_start = B_slice [b_tid] ;
const int64_t kB_end = B_slice [b_tid+1] ;
#if (!GB_C_IS_FULL)
int64_t task_cnvals = 0 ;
#endif
//----------------------------------------------------------------------
// C=A'*B, C<M>=A'*B, or C<!M>=A'*B via dot products
//----------------------------------------------------------------------
for (int64_t j = kB_start ; j < kB_end ; j++)
{
//------------------------------------------------------------------
// get C(:,j)
//------------------------------------------------------------------
const int64_t pC_start = j * cvlen ;
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
#if GB_B_IS_SPARSE
// B is sparse (never hypersparse)
const int64_t pB_start = Bp [j] ;
const int64_t pB_end = Bp [j+1] ;
const int64_t bjnz = pB_end - pB_start ;
if (bjnz == 0)
{
// no work to do if B(:,j) is empty, except to clear Cb
memset (&Cb [pC_start + kA_start], 0, kA_end - kA_start) ;
continue ;
}
#if GB_A_IS_SPARSE
// Both A and B are sparse; get first and last in B(:,j)
const int64_t ib_first = Bi [pB_start] ;
const int64_t ib_last = Bi [pB_end-1] ;
#endif
#else
// B is bitmap or full
const int64_t pB_start = j * vlen ;
#endif
//------------------------------------------------------------------
// C(:,j)<#M(:,j)> = A'*B(:,j), or C(:,j) = A'*B(:,j) if no mask
//------------------------------------------------------------------
for (int64_t i = kA_start ; i < kA_end ; i++)
{
//--------------------------------------------------------------
// get C(i,j), M(i,j), and clear the C(i,j) bitmap
//--------------------------------------------------------------
int64_t pC = pC_start + i ; // C is bitmap
#if defined ( GB_ANY_SPECIALIZED )
// M is bitmap and structural; Mask_comp true
Cb [pC] = 0 ;
if (!Mb [pC])
#elif defined ( GB_MASK_IS_PRESENT )
bool mij ;
if (M_is_bitmap)
{
// M is bitmap
mij = Mb [pC] && GB_mcast (Mx, pC, msize) ;
}
else if (M_is_full)
{
// M is full
mij = GB_mcast (Mx, pC, msize) ;
}
else // M is sparse or hyper
{
// M has been scattered into the C bitmap
mij = (Cb [pC] > 1) ;
}
Cb [pC] = 0 ;
if (mij ^ Mask_comp)
#elif GB_C_IS_FULL
// C is full; nothing to do
#else
// M is not present
Cb [pC] = 0 ;
#endif
{
//----------------------------------------------------------
// the mask allows C(i,j) to be computed
//----------------------------------------------------------
#if GB_A_IS_SPARSE
// A is sparse
int64_t pA = Ap [i] ;
const int64_t pA_end = Ap [i+1] ;
const int64_t ainz = pA_end - pA ;
#if (!GB_C_IS_FULL)
if (ainz > 0) // skip this test if C is full
#endif
#else
// A is bitmap or full
#ifdef GB_A_NOT_TRANSPOSED
// A(i,:) starts at position i
const int64_t pA = i ;
#else
// A(:,i) starts at position i * vlen
const int64_t pA = i * vlen ;
#endif
#endif
{
// C(i,j) = A(:,i)'*B(:,j) or A(i,:)*B(:,j)
bool cij_exists = false ;
GB_CIJ_DECLARE (cij) ;
#include "GB_AxB_dot_cij.c"
}
}
}
}
#if (!GB_C_IS_FULL)
cnvals += task_cnvals ;
#endif
}
}
#endif
#undef GB_A_IS_SPARSE
#undef GB_A_IS_HYPER
#undef GB_A_IS_BITMAP
#undef GB_A_IS_FULL
#undef GB_B_IS_SPARSE
#undef GB_B_IS_HYPER
#undef GB_B_IS_BITMAP
#undef GB_B_IS_FULL
#undef GB_DOT_ALWAYS_SAVE_CIJ
#undef GB_DOT_SAVE_CIJ
|
gramschmidt.c | /**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* gramschmidt.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "gramschmidt.h"
/* Array initialization. */
static
void init_array(int m, int n,
DATA_TYPE POLYBENCH_2D(A, M, N, m, n),
DATA_TYPE POLYBENCH_2D(R, N, N, n, n),
DATA_TYPE POLYBENCH_2D(Q, M, N, m, n))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
{
A[i][j] = (((DATA_TYPE) ((i * j) % m) / m ) * 100) + 10;
Q[i][j] = 0.0;
}
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
R[i][j] = 0.0;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m, int n,
DATA_TYPE POLYBENCH_2D(A, M, N, m, n),
DATA_TYPE POLYBENCH_2D(R, N, N, n, n),
DATA_TYPE POLYBENCH_2D(Q, M, N, m, n))
{
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("R");
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
{
if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, R[i][j]);
}
POLYBENCH_DUMP_END("R");
POLYBENCH_DUMP_BEGIN("Q");
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
{
if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, Q[i][j]);
}
POLYBENCH_DUMP_END("Q");
POLYBENCH_DUMP_FINISH;
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
/* QR Decomposition with Modified Gram Schmidt:
http://www.inf.ethz.ch/personal/gander/ */
static
void kernel_gramschmidt(int m, int n,
DATA_TYPE POLYBENCH_2D(A, M, N, m, n),
DATA_TYPE POLYBENCH_2D(R, N, N, n, n),
DATA_TYPE POLYBENCH_2D(Q, M, N, m, n))
{
int i, j, k;
DATA_TYPE nrm;
for (k = 0; k < _PB_N; k++)
{
nrm = SCALAR_VAL(0.0);
#pragma omp parallel for default(shared) private(i) firstprivate(m, k, A) reduction(+ : nrm)
for (i = 0; i < _PB_M; i++)
nrm += A[i][k] * A[i][k];
R[k][k] = SQRT_FUN(nrm);
#pragma omp parallel for default(shared) private(i) firstprivate(m, k, A, R)
for (i = 0; i < _PB_M; i++)
Q[i][k] = A[i][k] / R[k][k];
#pragma omp parallel for default(shared) private(j, i) firstprivate(k, n, m, Q)
for (j = k + 1; j < _PB_N; j++)
{
R[k][j] = SCALAR_VAL(0.0);
for (i = 0; i < _PB_M; i++)
R[k][j] += Q[i][k] * A[i][j];
for (i = 0; i < _PB_M; i++)
A[i][j] = A[i][j] - Q[i][k] * R[k][j];
}
}
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int m = M;
int n = N;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, M, N, m, n);
POLYBENCH_2D_ARRAY_DECL(R, DATA_TYPE, N, N, n, n);
POLYBENCH_2D_ARRAY_DECL(Q, DATA_TYPE, M, N, m, n);
/* Initialize array(s). */
init_array (m, n,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(R),
POLYBENCH_ARRAY(Q));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_gramschmidt (m, n,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(R),
POLYBENCH_ARRAY(Q));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(R), POLYBENCH_ARRAY(Q)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(R);
POLYBENCH_FREE_ARRAY(Q);
return 0;
}
|
fisher.c | /** @file fisher.c
** @brief Fisher - Declaration
** @author David Novotny
**/
/*
Copyright (C) 2013 David Novotny and Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page fisher Fisher Vector encoding (FV)
@author David Novotny
@author Andrea Vedaldi
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@ref fisher.h implements the Fisher Vectors (FV) image representation
@cite{perronnin06fisher} @cite{perronnin10improving}. A FV is a
statistics capturing the distribution of a set of vectors, usually a
set of local image descriptors.
@ref fisher-starting demonstrates how to use the C API to compute the
FV representation of an image. For further details refer to:
- @subpage fisher-fundamentals - Fisher Vector definition.
- @subpage fisher-derivation - Deriving the Fisher Vectors as a Fisher Kernel.
- @subpage fisher-kernel - The Fisher Kernel in general.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section fisher-starting Getting started
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The Fisher Vector encoding of a set of features is obtained by using
the function ::vl_fisher_encode. Note that the function requires a
@ref gmm "Gaussian Mixture Model" (GMM) of the encoded feature
distribution. In the following code, the result of the coding process
is stored in the @c enc array and the improved fisher vector
normalization is used.
@code
float * means ;
float * covariances ;
float * priors ;
float * posteriors ;
float * enc;
// create a GMM object and cluster input data to get means, covariances
// and priors of the estimated mixture
gmm = vl_gmm_new (VL_TYPE_FLOAT) ;
vl_gmm_cluster (gmm, data, dimension, numData, numClusters);
// allocate space for the encoding
enc = vl_malloc(sizeof(float) * 2 * dimension * numClusters);
// run fisher encoding
vl_fisher_encode
(enc, VL_F_TYPE,
vl_gmm_get_means(gmm), dimension, numClusters,
vl_gmm_get_covariances(gmm),
vl_gmm_get_priors(gmm),
dataToEncode, numDataToEncode,
VL_FISHER_FLAG_IMPROVED
) ;
@endcode
The performance of the standard Fisher Vector can be significantly
improved @cite{perronnin10improving} by using appropriate @ref
fisher-normalization normalizations. These are controlled by the @c
flag parameter of ::vl_fisher_encode.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page fisher-fundamentals Fisher vector fundamentals
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
This page describes the *Fisher Vector* (FV) of
@cite{perronnin06fisher} @cite{perronnin10improving}. See @ref fisher
for an overview of the C API and @ref fisher-kernel for its relation
to the more general notion of Fisher kernel.
The FV is an image representation obtained by pooling local image
features. It is frequently used as a global image descriptor in visual
classification.
While the FV can be @ref fisher-kernel "derived" as a special,
approximate, and improved case of the general Fisher Kernel framework,
it is easy to describe directly. Let $I = (\bx_1,\dots,\bx_N)$ be a
set of $D$ dimensional feature vectors (e.g. SIFT descriptors)
extracted from an image. Let
$\Theta=(\mu_k,\Sigma_k,\pi_k:k=1,\dots,K)$ be the parameters of a
@ref gmm "Gaussian Mixture Model" fitting the distribution of
descriptors. The GMM associates each vector $\bx_i$ to a mode $k$ in
the mixture with a strength given by the posterior probability:
\[
q_{ik} =
\frac
{\exp\left[-\frac{1}{2}(\bx_i - \mu_k)^T \Sigma_k^{-1} (\bx_i - \mu_k)\right]}
{\sum_{t=1}^K \exp\left[-\frac{1}{2}(\bx_i - \mu_t)^T \Sigma_k^{-1} (\bx_i - \mu_t)\right]}.
\]
For each mode $k$, consider the mean and covariance deviation vectors
@f{align*}
u_{jk} &=
{1 \over {N \sqrt{\pi_k}}}
\sum_{i=1}^{N}
q_{ik} \frac{x_{ji} - \mu_{jk}}{\sigma_{jk}},
\\
v_{jk} &=
{1 \over {N \sqrt{2 \pi_k}}}
\sum_{i=1}^{N}
q_{ik} \left[ \left(\frac{x_{ji} - \mu_{jk}}{\sigma_{jk}}\right)^2 - 1 \right].
@f}
where $j=1,2,\dots,D$ spans the vector dimensions. The FV of image $I$
is the stacking of the vectors $\bu_k$ and then of the vectors
$\bv_k$ for each of the $K$ modes in the Gaussian mixtures:
\[
\Phi(I) = \begin{bmatrix} \vdots \\ \bu_k \\ \vdots \\ \bv_k \\ \vdots \end{bmatrix}.
\]
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section fisher-normalization Normalization and improved Fisher vectors
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The *improved* Fisher Vector @cite{perronnin10improving} (IFV) improves the
classification performance of the representation by using to ideas:
1. *Non-linear additive kernel.* The Hellinger's kernel (or
Bhattacharya coefficient) can be used instead of the linear one at
no cost by signed squared rooting. This is obtained by applying the
function $|z| \sign z$ to each dimension of the vector $\Phi(I)$.
Other @ref homkermap "additive kernels" can also be used at an
increased space or time cost.
2. *Normalization.* Before using the representation in a linear model
(e.g. a @ref svm "support vector machine"), the vector $\Phi(I)$ is
further normalized by the $l^2$ norm (note that the standard Fisher
vector is normalized by the number of encoded feature vectors).
After square-rooting and normalization, the IFV is often used in a
linear classifier such as an @ref svm "SVM".
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section fisher-fast Faster computations
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
In practice, several data to cluster assignments $q_{ik}$ are likely
to be very small or even negligible. The *fast* version of the FV sets
to zero all but the largest assignment for each input feature $\bx_i$.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page fisher-derivation Fisher vector derivation
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The FV of @cite{perronnin06fisher} is a special case of the @ref
fisher-kernel "Fisher kernel" construction. It is designed to encode
local image features in a format that is suitable for learning and
comparison with simple metrics such as the Euclidean. In this
construction, an image is modeled as a collection of $D$-dimensional
feature vectors $I=(\bx_1,\dots,\bx_n)$ generated by a GMM with $K$
components $\Theta=(\mu_k,\Sigma_k,\pi_k:k=1,\dots,K)$. The covariance
matrices are assumed to be diagonal, i.e. $\Sigma_k = \diag
\bsigma_k^2$, $\bsigma_k \in \real^D_+$.
The generative model of *one* feature vector $\bx$ is given by the GMM
density function:
\[
p(\bx|\Theta) =
\sum_{k=1}^K \pi_k p(\bx|\Theta_k),
\quad
p(\bx|\Theta_k)
=
\frac{1}{(2\pi)^\frac{D}{2} (\det \Sigma_k)^{\frac{1}{2}}}
\exp
\left[
-\frac{1}{2}
(\bx - \mu_k)^\top \Sigma_k^{-1} (\bx - \mu_k)
\right]
\]
where $\Theta_k = (\mu_k,\Sigma_k)$. The Fisher Vector requires
computing the derivative of the log-likelihood function with respect
to the various model parameters. Consider in particular the parameters
$\Theta_k$ of a mode. Due to the exponent in the Gaussian density
function, the derivative can be written as
\[
\nabla_{\Theta_k} p(\bx|\Theta_k) =
p(\bx|\Theta_k)
g(\bx|\Theta_k)
\]
for a simple vector function $g$. The derivative of the log-likelihood
function is then
\[
\nabla_{\Theta_k} \log p(\bx|\Theta)
=
\frac{\pi_k p(\bx|\Theta_k)}{\sum_{t=1}^K \pi_k p(\bx|\Theta_k)}
g(\bx|\Theta_k)
=
q_k(\bx) g(\bx|\Theta_k)
\]
where $q_k(\bx)$ is the soft-assignment of the point $\bx$ to the mode
$k$. We make the approximation that $q_k(\bx)\approx 1$ if $\bx$ is
sampled from mode $k$ and $\approx 0$ otherwise
@cite{perronnin06fisher}. Hence one gets:
\[
E_{\bx \sim p(\bx|\Theta)}
[
\nabla_{\Theta_k} \log p(\bx|\Theta)
\nabla_{\Theta_t} \log p(\bx|\Theta)^\top
]
\approx
\begin{cases}
\pi_k E_{\bx \sim p(\bx|\Theta_k)} [ g(\bx|\Theta_k) g(\bx|\Theta_k)^\top], & t = k, \\
0, & t\not=k.
\end{cases}
\]
Thus under this approximation there is no correlation between the
parameters of the various Gaussian modes.
The function $g$ can be further broken down as the stacking of the
derivative w.r.t. the mean and the diagonal covariance.
\[
g(\bx|\Theta_k)
=
\begin{bmatrix}
g(\bx|\mu_k) \\
g(\bx|\bsigma_k)
\end{bmatrix},
\quad
[g(\bx|\mu_k)]_j
=
\frac{x_j - \mu_{jk}}{\sigma_{jk}^2},
\quad
[g(\bx|\bsigma_k^2)]_j
=
\frac{1}{2\sigma_{jk}^2}
\left(
\left(\frac{x_j - \mu_{jk}}{\sigma_{jk}}\right)^2
-
1
\right)
\]
Thus the covariance of the model (Fisher information) is diagonal and
the diagonal entries are given by
\[
H_{\mu_{jk}} = \pi_k E[g(\bx|\mu_{jk})g(\bx|\mu_{jk})]
= \frac{\pi_k}{\sigma_{jk}^2},
\quad
H_{\sigma_{jk}^2} = \frac{\pi_k}{2 \sigma_{jk}^4}.
\]
where in the calculation it was used the fact that the fourth moment
of the standard Gaussian distribution is 3. Multiplying the inverse
square root of the matrix $H$ by the derivative of the log-likelihood
function results in the Fisher vector encoding of one image feature
$\bx$:
\[
\Phi_{\mu_{jk}}(\bx) = H_{\mu_{jk}}^{-\frac{1}{2}} q_k(\bx) g(\bx|\mu_{jk})
= q_k(\bx) \frac{x_j - \mu_{jk}}{\sqrt{\pi_k}\sigma_{jk}},
\qquad
\Phi_{\sigma^2_{jk}}(\bx) =
\frac{q_k(\bx)}{\sqrt{2 \pi_k}}
\left(
\left(\frac{x_j - \mu_{jk}}{\sigma_{jk}}\right)^2
-
1
\right)
\]
Assuming that features are sampled i.i.d. from the GMM results in the
formulas given in @ref fisher-fundamentals (note the normalization
factor). Note that:
* The Fisher components relative to the prior probabilities $\pi_k$
have been ignored. This is because they have little effect on the
representation @cite{perronnin10improving}.
* Technically, the derivation of the Fisher Vector for multiple image
features requires the number of features to be the same in both
images. Ultimately, however, the representation can be computed by
using any number of features.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page fisher-kernel Fisher kernel
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
This page discusses the Fisher Kernels (FK) of
@cite{jaakkola98exploiting} and shows how the FV of
@cite{perronnin06fisher} can be derived from it as a special case. The
FK induces a similarity measures between data points $\bx$ and $\bx'$
from a parametric generative model $p(\bx|\Theta)$ of the data. The
parameter $\Theta$ of the model is selected to fit the a-priori
distribution of the data, and is usually the Maximum Likelihood (MLE)
estimate obtained from a set of training examples. Once the generative
model is learned, each particular datum $\bx$ is represented by
looking at how it affects the MLE parameter estimate. This effect is
measured by computing the gradient of the log-likelihood term
corresponding to $\bx$:
\[
\hat\Phi(\bx) = \nabla_\Theta \log p(\bx|\Theta)
\]
The vectors $\hat\Phi(\bx)$ should be appropriately scaled before they
can be meaningfully compared. This is obtained by *whitening* the data
by multiplying the vectors by the inverse of the square root of their
*covariance matrix*. The covariance matrix can be obtained from the
generative model $p(\bx|\Theta)$ itself. Since $\Theta$ is the ML
parameter and $\hat\Phi(\bx)$ is the gradient of the log-likelihood
function, its expected value $E[\hat\Phi(\bx)]$ is zero. Thus, since
the vectors are already centered, their covariance matrix is simply:
\[
H = E_{\bx \sim p(\bx|\Theta)} [\hat\Phi(\bx) \hat\Phi(\bx)^\top]
\]
Note that $H$ is also the *Fisher information matrix* of the
model. The final FV encoding $\Phi(\bx)$ is given by the whitened
gradient of the log-likelihood function, i.e.:
\[
\Phi(\bx) = H^{-\frac{1}{2}} \nabla_\Theta \log p(\bx|\Theta).
\]
Taking the inner product of two such vectors yields the *Fisher
kernel*:
\[
K(\bx,\bx')
= \langle \Phi(\bx),\Phi(\bx') \rangle
= \nabla_\Theta \log p(\bx|\Theta)^\top H^{-1} \nabla_\Theta \log p(\bx'|\Theta).
\]
**/
#include "fisher.h"
#include "gmm.h"
#include "mathop.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef VL_FISHER_INSTANTIATING
static vl_size
VL_XCAT(_vl_fisher_encode_, SFX)
(TYPE * enc,
TYPE const * means, vl_size dimension, vl_size numClusters,
TYPE const * covariances,
TYPE const * priors,
TYPE const * data, vl_size numData,
int flags)
{
vl_size dim;
vl_index i_cl, i_d;
vl_size numTerms = 0 ;
TYPE * posteriors ;
TYPE * sqrtInvSigma;
assert(numClusters >= 1) ;
assert(dimension >= 1) ;
posteriors = vl_malloc(sizeof(TYPE) * numClusters * numData);
sqrtInvSigma = vl_malloc(sizeof(TYPE) * dimension * numClusters);
memset(enc, 0, sizeof(TYPE) * 2 * dimension * numClusters) ;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
for(dim = 0; dim < dimension; dim++) {
sqrtInvSigma[i_cl*dimension + dim] = sqrt(1.0 / covariances[i_cl*dimension + dim]);
}
}
VL_XCAT(vl_get_gmm_data_posteriors_, SFX)(posteriors, numClusters, numData,
priors,
means, dimension,
covariances,
data) ;
/* sparsify posterior assignments with the FAST option */
if (flags & VL_FISHER_FLAG_FAST) {
for(i_d = 0; i_d < (signed)numData; i_d++) {
/* find largest posterior assignment for datum i_d */
vl_index best = 0 ;
TYPE bestValue = posteriors[i_d * numClusters] ;
for (i_cl = 1 ; i_cl < (signed)numClusters; ++ i_cl) {
TYPE p = posteriors[i_cl + i_d * numClusters] ;
if (p > bestValue) {
bestValue = p ;
best = i_cl ;
}
}
/* make all posterior assignments zero but the best one */
for (i_cl = 0 ; i_cl < (signed)numClusters; ++ i_cl) {
posteriors[i_cl + i_d * numClusters] =
(TYPE)(i_cl == best) ;
}
}
}
#if defined(_OPENMP)
#pragma omp parallel for default(shared) private(i_cl, i_d, dim) num_threads(vl_get_max_threads()) reduction(+:numTerms)
#endif
for(i_cl = 0; i_cl < (signed)numClusters; ++ i_cl) {
TYPE uprefix;
TYPE vprefix;
TYPE * uk = enc + i_cl*dimension ;
TYPE * vk = enc + i_cl*dimension + numClusters * dimension ;
/*
If the GMM component is degenerate and has a null prior, then it
must have null posterior as well. Hence it is safe to skip it. In
practice, we skip over it even if the prior is very small; if by
any chance a feature is assigned to such a mode, then its weight
would be very high due to the division by priors[i_cl] below.
*/
if (priors[i_cl] < 1e-6) { continue ; }
for(i_d = 0; i_d < (signed)numData; i_d++) {
TYPE p = posteriors[i_cl + i_d * numClusters] ;
if (p < 1e-6) continue ;
numTerms += 1;
for(dim = 0; dim < dimension; dim++) {
TYPE diff = data[i_d*dimension + dim] - means[i_cl*dimension + dim] ;
diff *= sqrtInvSigma[i_cl*dimension + dim] ;
*(uk + dim) += p * diff ;
*(vk + dim) += p * (diff * diff - 1);
}
}
if (numData > 0) {
uprefix = 1/(numData*sqrt(priors[i_cl]));
vprefix = 1/(numData*sqrt(2*priors[i_cl]));
for(dim = 0; dim < dimension; dim++) {
*(uk + dim) = *(uk + dim) * uprefix;
*(vk + dim) = *(vk + dim) * vprefix;
}
}
}
vl_free(posteriors);
vl_free(sqrtInvSigma) ;
if (flags & VL_FISHER_FLAG_SQUARE_ROOT) {
for(dim = 0; dim < 2 * dimension * numClusters ; dim++) {
TYPE z = enc [dim] ;
if (z >= 0) {
enc[dim] = VL_XCAT(vl_sqrt_, SFX)(z) ;
} else {
enc[dim] = - VL_XCAT(vl_sqrt_, SFX)(- z) ;
}
}
}
if (flags & VL_FISHER_FLAG_NORMALIZED) {
TYPE n = 0 ;
for(dim = 0 ; dim < 2 * dimension * numClusters ; dim++) {
TYPE z = enc [dim] ;
n += z * z ;
}
n = VL_XCAT(vl_sqrt_, SFX)(n) ;
n = VL_MAX(n, 1e-12) ;
for(dim = 0 ; dim < 2 * dimension * numClusters ; dim++) {
enc[dim] /= n ;
}
}
return numTerms ;
}
#else
/* not VL_FISHER_INSTANTIATING */
#ifndef __DOXYGEN__
#define FLT VL_TYPE_FLOAT
#define TYPE float
#define SFX f
#define VL_FISHER_INSTANTIATING
#include "fisher.c"
#define FLT VL_TYPE_DOUBLE
#define TYPE double
#define SFX d
#define VL_FISHER_INSTANTIATING
#include "fisher.c"
#endif
/* not VL_FISHER_INSTANTIATING */
#endif
/* ================================================================ */
#ifndef VL_FISHER_INSTANTIATING
/** @brief Fisher vector encoding of a set of vectors.
** @param dataType the type of the input data (::VL_TYPE_DOUBLE or ::VL_TYPE_FLOAT).
** @param enc Fisher vector (output).
** @param means Gaussian mixture means.
** @param dimension dimension of the data.
** @param numClusters number of Gaussians mixture components.
** @param covariances Gaussian mixture diagonal covariances.
** @param priors Gaussian mixture prior probabilities.
** @param data vectors to encode.
** @param numData number of vectors to encode.
** @param flags options.
** @return number of averaging operations.
**
** @a means and @a covariances have @a dimension rows and @a
** numCluster columns. @a priors is a vector of size @a
** numCluster. @a data has @a dimension rows and @a numData
** columns. @a enc is a vecotr of size equal to twice the product of
** @a dimension and @a numClusters. All these vectors and matrices
** have the same class, as specified by @a dataType, and must be
** stored in column-major format.
**
** @a flag can be used to control several options:
** ::VL_FISHER_FLAG_SQUARE_ROOT, ::VL_FISHER_FLAG_NORMALIZED,
** ::VL_FISHER_FLAG_IMPROVED, and ::VL_FISHER_FLAG_FAST.
**
** The function returns the number of averaging operations actually
** performed. The upper bound is the number of input features by the
** number of GMM modes; however, assignments are usually failry
** sparse, so this number is often much smaller. In particular, with
** the ::VL_FISHER_FLAG_FAST, is equal to the number of input
** features. This information can be used for diagnostic purposes.
**
** @sa @ref fisher
**/
VL_EXPORT vl_size
vl_fisher_encode
(void * enc, vl_type dataType,
void const * means, vl_size dimension, vl_size numClusters,
void const * covariances,
void const * priors,
void const * data, vl_size numData,
int flags
)
{
switch(dataType) {
case VL_TYPE_FLOAT:
return _vl_fisher_encode_f
((float *) enc,
(float const *) means, dimension, numClusters,
(float const *) covariances,
(float const *) priors,
(float const *) data, numData,
flags);
case VL_TYPE_DOUBLE:
return _vl_fisher_encode_d
((double *) enc,
(double const *) means, dimension, numClusters,
(double const *) covariances,
(double const *) priors,
(double const *) data, numData,
flags);
break;
default:
abort();
}
}
/* not VL_FISHER_INSTANTIATING */
#endif
#undef SFX
#undef TYPE
#undef FLT
#undef VL_FISHER_INSTANTIATING
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
double
fuzz;
const char
*artifact;
Image
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
difference_image=ExtentImage(image,&geometry,exception);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p,
*magick_restrict q;
register Quantum
*magick_restrict r;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
register ssize_t
i;
if (GetPixelReadMask(image,p) == 0)
{
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(difference_image,OffAlphaChannel,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
register ssize_t
i;
if (GetPixelReadMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if (GetPixelReadMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]/=((double) columns*rows);
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if (GetPixelReadMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]/=((double) columns*rows);
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if (GetPixelReadMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if (GetPixelReadMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]/=((double) columns*rows);
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=1.0/((double) columns*rows);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if (GetPixelReadMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*(image->alpha_trait != UndefinedPixelTrait ?
GetPixelAlpha(image,p) : OpaqueAlpha);
Da=QuantumScale*(reconstruct_image->alpha_trait != UndefinedPixelTrait ?
GetPixelAlpha(reconstruct_image,q) : OpaqueAlpha);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
{
distortion[i]+=area*QuantumScale*(p[i]-
image_statistics[channel].mean)*(GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
else
{
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel=GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
GetImageChannels(image));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if (GetPixelReadMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=20.0*MagickLog10((double) 1.0/sqrt(distortion[i]));
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*image_phash,
*reconstruct_phash;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
image_phash=GetImagePerceptualHash(image,exception);
if (image_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash);
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
register ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
alpha=image_phash[channel].srgb_hu_phash[i];
beta=reconstruct_phash[channel].srgb_hu_phash[i];
difference+=(beta-alpha)*(beta-alpha);
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Compute perceptual hash in the HCLP colorspace.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
register ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
alpha=image_phash[channel].hclp_hu_phash[i];
beta=reconstruct_phash[channel].hclp_hu_phash[i];
difference+=(beta-alpha)*(beta-alpha);
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
shared(progress,status,similarity_metric) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
if (GetPixelReadMask(similarity_image,q) == 0)
{
SetPixelBackgoundColor(similarity_image,q);
q+=GetPixelChannels(similarity_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
DRB071-targetparallelfor-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
use of omp target: len is not mapped. It should be firstprivate within target.
*/
int main(int argc, char* argv[])
{
int i;
int len = 1000;
int a[len];
#pragma omp parallel for private(i )
for (i=0; i<len; i++)
a[i]= i;
#pragma omp parallel for private(i )
for (i=0;i< len;i++)
a[i]=a[i]+1;
for (i=0; i<len; i++)
printf("%d", a[i]);
return 0;
}
|
9076.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp target teams distribute schedule(static, 4)
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
backprop.c | /*
******************************************************************
* HISTORY
* 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University
* Prepared for 15-681, Fall 1994.
* Modified by Shuai Che
******************************************************************
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "backprop.h"
#include <math.h>
//#define OPEN
#define ABS(x) (((x) > 0.0) ? (x) : (-(x)))
#define fastcopy(to,from,len)\
{\
register char *_to,*_from;\
register int _i,_l;\
_to = (char *)(to);\
_from = (char *)(from);\
_l = (len);\
for (_i = 0; _i < _l; _i++) *_to++ = *_from++;\
}
/*** Return random number between 0.0 and 1.0 ***/
float drnd()
{
return ((float) rand() / (float) BIGRND);
}
/*** Return random number between -1.0 and 1.0 ***/
float dpn1()
{
return ((drnd() * 2.0) - 1.0);
}
/*** The squashing function. Currently, it's a sigmoid. ***/
float squash(x)
float x;
{
float m;
//x = -x;
//m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120;
//return(1.0 / (1.0 + m));
return (1.0 / (1.0 + exp(-x)));
}
/*** Allocate 1d array of floats ***/
float *alloc_1d_dbl(n)
int n;
{
float *new;
new = (float *) malloc ((unsigned) (n * sizeof (float)));
if (new == NULL) {
printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n");
return (NULL);
}
return (new);
}
/*** Allocate 2d array of floats ***/
float **alloc_2d_dbl(m, n)
int m, n;
{
int i;
float **new;
new = (float **) malloc ((unsigned) (m * sizeof (float *)));
if (new == NULL) {
printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n");
return (NULL);
}
for (i = 0; i < m; i++) {
new[i] = alloc_1d_dbl(n);
}
return (new);
}
bpnn_randomize_weights(w, m, n)
float **w;
int m, n;
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = (float) rand()/RAND_MAX;
// w[i][j] = dpn1();
}
}
}
bpnn_randomize_row(w, m)
float *w;
int m;
{
int i;
for (i = 0; i <= m; i++) {
//w[i] = (float) rand()/RAND_MAX;
w[i] = 0.1;
}
}
bpnn_zero_weights(w, m, n)
float **w;
int m, n;
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = 0.0;
}
}
}
void bpnn_initialize(seed)
{
printf("Random number generator seed: %d\n", seed);
srand(seed);
}
BPNN *bpnn_internal_create(n_in, n_hidden, n_out)
int n_in, n_hidden, n_out;
{
BPNN *newnet;
newnet = (BPNN *) malloc (sizeof (BPNN));
if (newnet == NULL) {
printf("BPNN_CREATE: Couldn't allocate neural network\n");
return (NULL);
}
newnet->input_n = n_in;
newnet->hidden_n = n_hidden;
newnet->output_n = n_out;
newnet->input_units = alloc_1d_dbl(n_in + 1);
newnet->hidden_units = alloc_1d_dbl(n_hidden + 1);
newnet->output_units = alloc_1d_dbl(n_out + 1);
newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1);
newnet->output_delta = alloc_1d_dbl(n_out + 1);
newnet->target = alloc_1d_dbl(n_out + 1);
newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
return (newnet);
}
void bpnn_free(net)
BPNN *net;
{
int n1, n2, i;
n1 = net->input_n;
n2 = net->hidden_n;
free((char *) net->input_units);
free((char *) net->hidden_units);
free((char *) net->output_units);
free((char *) net->hidden_delta);
free((char *) net->output_delta);
free((char *) net->target);
for (i = 0; i <= n1; i++) {
free((char *) net->input_weights[i]);
free((char *) net->input_prev_weights[i]);
}
free((char *) net->input_weights);
free((char *) net->input_prev_weights);
for (i = 0; i <= n2; i++) {
free((char *) net->hidden_weights[i]);
free((char *) net->hidden_prev_weights[i]);
}
free((char *) net->hidden_weights);
free((char *) net->hidden_prev_weights);
free((char *) net);
}
/*** Creates a new fully-connected network from scratch,
with the given numbers of input, hidden, and output units.
Threshold units are automatically included. All weights are
randomly initialized.
Space is also allocated for temporary storage (momentum weights,
error computations, etc).
***/
BPNN *bpnn_create(n_in, n_hidden, n_out)
int n_in, n_hidden, n_out;
{
BPNN *newnet;
newnet = bpnn_internal_create(n_in, n_hidden, n_out);
#ifdef INITZERO
bpnn_zero_weights(newnet->input_weights, n_in, n_hidden);
#else
bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden);
#endif
bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out);
bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden);
bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out);
bpnn_randomize_row(newnet->target, n_out);
return (newnet);
}
void bpnn_layerforward(l1, l2, conn, n1, n2)
float *l1, *l2, **conn;
int n1, n2;
{
float sum;
int j, k;
/*** Set up thresholding unit ***/
l1[0] = 1.0;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static)
#endif
/*** For each unit in second layer ***/
for (j = 1; j <= n2; j++) {
/*** Compute weighted sum of its inputs ***/
sum = 0.0;
for (k = 0; k <= n1; k++) {
sum += conn[k][j] * l1[k];
}
l2[j] = squash(sum);
}
}
//extern "C"
void bpnn_output_error(delta, target, output, nj, err)
float *delta, *target, *output, *err;
int nj;
{
int j;
float o, t, errsum;
errsum = 0.0;
for (j = 1; j <= nj; j++) {
o = output[j];
t = target[j];
delta[j] = o * (1.0 - o) * (t - o);
errsum += ABS(delta[j]);
}
*err = errsum;
}
void bpnn_hidden_error(delta_h,
nh,
delta_o,
no,
who,
hidden,
err)
float *delta_h, *delta_o, *hidden, **who, *err;
int nh, no;
{
int j, k;
float h, sum, errsum;
errsum = 0.0;
for (j = 1; j <= nh; j++) {
h = hidden[j];
sum = 0.0;
for (k = 1; k <= no; k++) {
sum += delta_o[k] * who[j][k];
}
delta_h[j] = h * (1.0 - h) * sum;
errsum += ABS(delta_h[j]);
}
*err = errsum;
}
void bpnn_adjust_weights(delta, ndelta, ly, nly, w, oldw, eta, momentum)
float *delta, *ly, **w, **oldw, eta, momentum;
{
float new_dw;
int k, j;
ly[0] = 1.0;
eta = 0.3;
momentum = 0.3;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for \
shared(oldw, w, delta) \
private(j, k, new_dw) \
firstprivate(ndelta, nly, momentum, eta)
#endif
for (j = 1; j <= ndelta; j++) {
for (k = 0; k <= nly; k++) {
new_dw = ((eta * delta[j] * ly[k]) + (momentum * oldw[k][j]));
w[k][j] += new_dw;
oldw[k][j] = new_dw;
}
}
}
void bpnn_feedforward(net)
BPNN *net;
{
int in, hid, out;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
}
void bpnn_train(net, eta, momentum, eo, eh)
BPNN *net;
float eta, momentum, *eo, *eh;
{
int in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
/*** Compute error on output and hidden units. ***/
bpnn_output_error(net->output_delta, net->target, net->output_units,
out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out,
net->hidden_weights, net->hidden_units, &hid_err);
*eo = out_err;
*eh = hid_err;
/*** Adjust input and hidden weights. ***/
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid,
net->hidden_weights, net->hidden_prev_weights, eta, momentum);
bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in,
net->input_weights, net->input_prev_weights, eta, momentum);
}
void bpnn_save(net, filename)
BPNN *net;
char *filename;
{
int n1, n2, n3, i, j, memcnt;
float dvalue, **w;
char *mem;
///add//
FILE *pFile;
pFile = fopen( filename, "w+" );
///////
/*
if ((fd = creat(filename, 0644)) == -1) {
printf("BPNN_SAVE: Cannot create '%s'\n", filename);
return;
}
*/
n1 = net->input_n; n2 = net->hidden_n; n3 = net->output_n;
printf("Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename);
//fflush(stdout);
//write(fd, (char *) &n1, sizeof(int));
//write(fd, (char *) &n2, sizeof(int));
//write(fd, (char *) &n3, sizeof(int));
fwrite( (char *) &n1 , sizeof(char), sizeof(char), pFile);
fwrite( (char *) &n2 , sizeof(char), sizeof(char), pFile);
fwrite( (char *) &n3 , sizeof(char), sizeof(char), pFile);
memcnt = 0;
w = net->input_weights;
mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float)));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
//write(fd, mem, (n1+1) * (n2+1) * sizeof(float));
fwrite( mem , (unsigned)(sizeof(float)), (unsigned) ((n1+1) * (n2+1) * sizeof(float)) , pFile);
free(mem);
memcnt = 0;
w = net->hidden_weights;
mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float)));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
//write(fd, mem, (n2+1) * (n3+1) * sizeof(float));
fwrite( mem , sizeof(float), (unsigned) ((n2+1) * (n3+1) * sizeof(float)) , pFile);
free(mem);
fclose(pFile);
return;
}
BPNN *bpnn_read(filename)
char *filename;
{
char *mem;
BPNN *new;
int fd, n1, n2, n3, i, j, memcnt;
if ((fd = open(filename, 0, 0644)) == -1) {
return (NULL);
}
printf("Reading '%s'\n", filename); //fflush(stdout);
read(fd, (char *) &n1, sizeof(int));
read(fd, (char *) &n2, sizeof(int));
read(fd, (char *) &n3, sizeof(int));
new = bpnn_internal_create(n1, n2, n3);
printf("'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3);
printf("Reading input weights..."); //fflush(stdout);
memcnt = 0;
mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float)));
read(fd, mem, (n1+1) * (n2+1) * sizeof(float));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
fastcopy(&(new->input_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
printf("Done\nReading hidden weights..."); //fflush(stdout);
memcnt = 0;
mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float)));
read(fd, mem, (n2+1) * (n3+1) * sizeof(float));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
fastcopy(&(new->hidden_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
close(fd);
printf("Done\n"); //fflush(stdout);
bpnn_zero_weights(new->input_prev_weights, n1, n2);
bpnn_zero_weights(new->hidden_prev_weights, n2, n3);
return (new);
}
|
clustering.c | /* * Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
* Berlin, 14195 Berlin, Germany.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define NO_IMPORT_ARRAY
#include <clustering.h>
#include <assert.h>
float euclidean_distance(float *SKP_restrict a, float *SKP_restrict b, size_t n, float *buffer_a, float *buffer_b)
{
double sum;
size_t i;
sum = 0.0;
for(i=0; i<n; ++i) {
sum += (a[i]-b[i])*(a[i]-b[i]);
}
return sqrt(sum);
}
float minRMSD_distance(float *SKP_restrict a, float *SKP_restrict b, size_t n, float *SKP_restrict buffer_a, float *SKP_restrict buffer_b)
{
float msd;
float trace_a, trace_b;
memcpy(buffer_a, a, n*sizeof(float));
memcpy(buffer_b, b, n*sizeof(float));
inplace_center_and_trace_atom_major(buffer_a, &trace_a, 1, n/3);
inplace_center_and_trace_atom_major(buffer_b, &trace_b, 1, n/3);
msd = msd_atom_major(n/3, n/3, buffer_a, buffer_b, trace_a, trace_b, 0, NULL);
return sqrt(msd);
}
int c_assign(float *chunk, float *centers, npy_int32 *dtraj, char* metric, Py_ssize_t N_frames, Py_ssize_t N_centers, Py_ssize_t dim) {
int ret;
float d, mindist;
size_t argmin;
float *buffer_a, *buffer_b;
float (*distance)(float*, float*, size_t, float*, float*);
buffer_a = NULL; buffer_b = NULL;
ret = ASSIGN_SUCCESS;
/* init metric */
if(strcmp(metric,"euclidean")==0) {
distance = euclidean_distance;
} else if(strcmp(metric,"minRMSD")==0) {
distance = minRMSD_distance;
buffer_a = malloc(dim*sizeof(float));
buffer_b = malloc(dim*sizeof(float));
if(!buffer_a || !buffer_b) {
ret = ASSIGN_ERR_NO_MEMORY; goto error;
}
} else {
ret = ASSIGN_ERR_INVALID_METRIC;
goto error;
}
/* do the assignment */
{
Py_ssize_t i,j;
// #pragma omp for private(j, argmin, mindist)
for(i = 0; i < N_frames; ++i) {
mindist = FLT_MAX;
argmin = -1;
for(j = 0; j < N_centers; ++j) {
d = distance(&chunk[i*dim], ¢ers[j*dim], dim, buffer_a, buffer_b);
// #pragma omp critical
{
if(d<mindist) { mindist = d; argmin = j; }
}
}
dtraj[i] = argmin;
}
}
error:
free(buffer_a);
free(buffer_b);
return ret;
}
PyObject *assign(PyObject *self, PyObject *args) {
PyObject *py_centers, *py_res;
PyArrayObject *np_chunk, *np_centers, *np_dtraj;
Py_ssize_t N_centers, N_frames, dim;
float *chunk;
float *centers;
npy_int32 *dtraj;
char *metric;
py_centers = NULL; py_res = NULL;
np_chunk = NULL; np_dtraj = NULL;
centers = NULL; metric=""; chunk = NULL; dtraj = NULL;
if (!PyArg_ParseTuple(args, "O!OO!s", &PyArray_Type, &np_chunk, &py_centers, &PyArray_Type, &np_dtraj, &metric)) goto error; /* ref:borr. */
/* import chunk */
if(PyArray_TYPE(np_chunk)!=NPY_FLOAT32) { PyErr_SetString(PyExc_ValueError, "dtype of \"chunk\" isn\'t float (32)."); goto error; };
if(!PyArray_ISCARRAY_RO(np_chunk) ) { PyErr_SetString(PyExc_ValueError, "\"chunk\" isn\'t C-style contiguous or isn\'t behaved."); goto error; };
if(PyArray_NDIM(np_chunk)!=2) { PyErr_SetString(PyExc_ValueError, "Number of dimensions of \"chunk\" isn\'t 2."); goto error; };
N_frames = np_chunk->dimensions[0];
dim = np_chunk->dimensions[1];
if(dim==0) {
PyErr_SetString(PyExc_ValueError, "chunk dimension must be larger than zero.");
goto error;
}
chunk = PyArray_DATA(np_chunk);
/* import dtraj */
if(PyArray_TYPE(np_dtraj)!=NPY_INT32) { PyErr_SetString(PyExc_ValueError, "dtype of \"dtraj\" isn\'t int (32)."); goto error; };
if(!PyArray_ISBEHAVED_RO(np_dtraj) ) { PyErr_SetString(PyExc_ValueError, "\"dtraj\" isn\'t behaved."); goto error; };
if(PyArray_NDIM(np_dtraj)!=1) { PyErr_SetString(PyExc_ValueError, "Number of dimensions of \"dtraj\" isn\'t 1."); goto error; };
if(np_chunk->dimensions[0]!=N_frames) {
PyErr_SetString(PyExc_ValueError, "Size of \"dtraj\" differs from number of frames in \"chunk\".");
goto error;
}
dtraj = (npy_int32*)PyArray_DATA(np_dtraj);
/* import list of cluster centers */
np_centers = (PyArrayObject*)PyArray_ContiguousFromAny(py_centers, NPY_FLOAT32, 2, 2);
if(!np_centers) {
PyErr_SetString(PyExc_ValueError, "Could not convert \"centers\" to two-dimensional C-contiguous behaved ndarray of float (32).");
goto error;
}
N_centers = np_centers->dimensions[0];
if(N_centers==0) {
PyErr_SetString(PyExc_ValueError, "centers must contain at least one element.");
goto error;
}
if(np_centers->dimensions[1]!=dim) {
PyErr_SetString(PyExc_ValueError, "Dimension of cluster centers doesn\'t match dimension of frames.");
goto error;
}
centers = (float*)PyArray_DATA(np_centers);
/* do the assignment */
switch(c_assign(chunk, centers, dtraj, metric, N_frames, N_centers, dim)) {
case ASSIGN_ERR_INVALID_METRIC:
PyErr_SetString(PyExc_ValueError, "metric must be one of \"euclidean\" or \"minRMSD\".");
goto error;
case ASSIGN_ERR_NO_MEMORY:
PyErr_NoMemory();
goto error;
}
py_res = Py_BuildValue(""); /* =None */
/* fall through */
error:
return py_res;
}
|
GB_unop__identity_int16_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int16_fp32
// op(A') function: GB_unop_tran__identity_int16_fp32
// C type: int16_t
// A type: float
// cast: int16_t cij = GB_cast_to_int16_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = GB_cast_to_int16_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int16_fp32
(
int16_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int16_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
2068.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp target teams distribute schedule(dynamic, 8)
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
GB_unaryop__ainv_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp32_fp32
// op(A') function: GB_tran__ainv_fp32_fp32
// C type: float
// A type: float
// cast: float cij = (float) aij
// unaryop: cij = -aij
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp32_fp32
(
float *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_taskyield2.c | <ompts:test>
<ompts:testdescription>Test taskyield directive. First generate a set of tasks and pause it immediately. Then we resume half of them and check whether they are scheduled by different threads</ompts:testdescription>
<ompts:ompversion>3.0</ompts:ompversion>
<ompts:directive>omp taskyield</ompts:directive>
<ompts:dependences>omp taskwait</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
#include "timer.h"
int <ompts:testcode:functionname>omp_taskyield</ompts:testcode:functionname>(FILE * logFile){
<ompts:orphan:vars>
int go=0;
double main_task_finish_time=0;
double long_child_finish_time=0;
</ompts:orphan:vars>
#pragma omp parallel num_threads(2)
{
#pragma omp single nowait
{ <ompts:orphan>
#pragma omp task
{ fprintf(logFile," %lf create parent task by thread %d\n",timer(),omp_get_thread_num());
#pragma omp task shared(go)
{
fprintf(logFile," %lf create small task by thread %d\n",timer(),omp_get_thread_num());
while (go<1)
{
}// end of while
my_sleep (SLEEPTIME);
fprintf(logFile," %lf finish small task by thread %d\n",timer(),omp_get_thread_num());
}
#pragma omp task shared(go)
{
fprintf(logFile," %lf create big task by thread %d\n",timer(),omp_get_thread_num());
go=1;
my_sleep(SLEEPTIME_LONG);
long_child_finish_time=timer();
fprintf(logFile," %lf finish big task by thread %d\n",timer(),omp_get_thread_num());
}
<ompts:check>#pragma omp taskyield</ompts:check>
my_sleep (SLEEPTIME);
main_task_finish_time=timer();
fprintf(logFile," %lf finish parent task by thread %d\n",timer(),omp_get_thread_num());
}/* end of omp main task */
</ompts:orphan>
} /* end of single */
} /* end of parallel */
return (main_task_finish_time>long_child_finish_time);
}
</ompts:testcode>
</ompts:test>
|
aula2809.c | // omp_schedule.cpp
// compile with: /openmp
//#include <windows.h>
#include <stdio.h>
#include <omp.h>
#include <unistd.h>
#define NUM_THREADS 4
#define STATIC_CHUNK 5
#define DYNAMIC_CHUNK 5
#define NUM_LOOPS 20
#define sleep_EVERY_N 3
int main( )
{
int nStatic1[NUM_LOOPS],
nStaticN[NUM_LOOPS];
int nDynamic1[NUM_LOOPS],
nDynamicN[NUM_LOOPS];
int nGuided[NUM_LOOPS];
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
{
#pragma omp for schedule(static, 1)
for (int i = 0 ; i < NUM_LOOPS ; ++i)
{
if ((i % sleep_EVERY_N) == 0)
sleep(0);
nStatic1[i] = omp_get_thread_num( );
}
#pragma omp for schedule(static, STATIC_CHUNK)
for (int i = 0 ; i < NUM_LOOPS ; ++i)
{
if ((i % sleep_EVERY_N) == 0)
sleep(0);
nStaticN[i] = omp_get_thread_num( );
}
#pragma omp for schedule(dynamic, 1)
for (int i = 0 ; i < NUM_LOOPS ; ++i)
{
if ((i % sleep_EVERY_N) == 0)
sleep(0);
nDynamic1[i] = omp_get_thread_num( );
}
#pragma omp for schedule(dynamic, DYNAMIC_CHUNK)
for (int i = 0 ; i < NUM_LOOPS ; ++i)
{
if ((i % sleep_EVERY_N) == 0)
sleep(0);
nDynamicN[i] = omp_get_thread_num( );
}
#pragma omp for schedule(guided)
for (int i = 0 ; i < NUM_LOOPS ; ++i)
{
if ((i % sleep_EVERY_N) == 0)
sleep(0);
nGuided[i] = omp_get_thread_num( );
}
}
printf("------------------------------------------------\n");
printf("| static | static | dynamic | dynamic | guided |\n");
printf("| 1 | %d | 1 | %d | |\n",
STATIC_CHUNK, DYNAMIC_CHUNK);
printf("------------------------------------------------\n");
for (int i=0; i<NUM_LOOPS; ++i)
{
printf("| %d | %d | %d | %d |"
" %d |\n",
nStatic1[i], nStaticN[i],
nDynamic1[i], nDynamicN[i], nGuided[i]);
}
printf("------------------------------------------------\n");
}
|
functions.h | #ifndef __FUNCTIONS_H__
#define __FUNCTIONS_H__
#include "../scaling/scaling.h"
#include "../summation/summation.h"
#include "../contraction/contraction.h"
namespace CTF {
/**
* @defgroup CTF_func CTF functions
* \brief user-defined function interface
* @addtogroup CTF_func
* @{
*/
class Idx_Tensor;
/**
* \brief custom scalar function on tensor: e.g. A["ij"] = f(A["ij"])
*/
template<typename dtype=double>
class Endomorphism : public CTF_int::endomorphism {
public:
/**
* \brief function signature for element-wise operation a=f(a)
*/
//dtype (*f)(dtype);
std::function<void(dtype&)> f;
/**
* \brief constructor takes function pointer
* \param[in] f_ scalar function: (type) -> (type)
*/
Endomorphism(std::function<void(dtype&)> f_){ f = f_; }
/**
* \brief default constructor
*/
Endomorphism(){}
/**
* \brief apply function f to value stored at a
* \param[in,out] a pointer to operand that will be cast to dtype
* is set to result of applying f on value at a
*/
void apply_f(char * a) const { f(((dtype*)a)[0]); }
};
/**
* \brief custom function f : X -> Y to be applied to tensor elemetns:
* e.g. B["ij"] = f(A["ij"])
*/
template<typename dtype_A=double, typename dtype_B=dtype_A>
class Univar_Function : public CTF_int::univar_function {
public:
/**
* \brief function signature for element-wise multiplication, compute b=f(a)
*/
//dtype_B (*f)(dtype_A);
std::function<dtype_B(dtype_A)> f;
/**
* \brief constructor takes function pointers to compute B=f(A));
* \param[in] f_ linear function (type_A)->(type_B)
*/
Univar_Function(std::function<dtype_B(dtype_A)> f_){ f = f_; }
/**
* \brief apply function f to value stored at a
* \param[in] a pointer to operand that will be cast to dtype
* \param[in,out] b result &f(*a) of applying f on value of (different type) on a
*/
void apply_f(char const * a, char * b) const { ((dtype_B*)b)[0]=f(((dtype_A*)a)[0]); }
/**
* \brief compute b = b+f(a)
* \param[in] a pointer to operand that will be cast to dtype
* \param[in,out] b result &f(*a) of applying f on value of (different type) on a
* \param[in] sr_B algebraic structure for b, needed to do add
*/
void acc_f(char const * a, char * b, CTF_int::algstrct const * sr_B) const {
dtype_B tb=f(((dtype_A*)a)[0]);
sr_B->add(b, (char const *)&tb, b);
}
};
/**
* \brief custom function f : (X * Y) -> X applied on two tensors as summation:
* e.g. B["ij"] = f(A["ij"],B["ij"])
*/
template<typename dtype_A=double, typename dtype_B=dtype_A>
class Univar_Transform : public CTF_int::univar_function {
public:
/**
* \brief function signature for element-wise multiplication, compute b=f(a)
*/
//void (*f)(dtype_A, dtype_B &);
std::function<void(dtype_A, dtype_B &)> f;
/**
* \brief constructor takes function pointers to compute B=f(A));
* \param[in] f_ linear function (type_A)->(type_B)
*/
Univar_Transform(std::function<void(dtype_A, dtype_B &)> f_){ f = f_; }
/**
* \brief apply function f to value stored at a, for an accumulator, this is the same as acc_f below
* \param[in] a pointer to operand that will be cast to dtype
* \param[in,out] b result &f(*a) of applying f on value of (different type) on a
*/
void apply_f(char const * a, char * b) const { acc_f(a,b,NULL); }
/**
* \brief compute f(a,b)
* \param[in] a pointer to the accumulated operand
* \param[in,out] b value that is accumulated to
* \param[in] sr_B algebraic structure for b, here is ignored
*/
void acc_f(char const * a, char * b, CTF_int::algstrct const * sr_B) const {
f(((dtype_A*)a)[0], ((dtype_B*)b)[0]);
}
bool is_accumulator() const { return true; }
};
/**
* \brief custom bivariate function on two tensors:
* e.g. C["ij"] = f(A["ik"],B["kj"])
*/
template<typename dtype_A=double, typename dtype_B=dtype_A, typename dtype_C=dtype_A>
class Bivar_Function : public CTF_int::bivar_function {
public:
/**
* \brief function signature for element-wise multiplication, compute C=f(A,B)
*/
//dtype_C (*f)(dtype_A, dtype_B);
std::function<dtype_C (dtype_A, dtype_B)> f;
/**
* \brief constructor takes function pointers to compute C=f(A,B);
* \param[in] f_ bivariate function (type_A,type_B)->(type_C)
*/
Bivar_Function(std::function<dtype_C (dtype_A, dtype_B)> f_)
: CTF_int::bivar_function(){
f=f_; commutative=0;
}
/**
* \brief constructor takes function pointers to compute C=f(A,B);
* \param[in] f_ bivariate function (type_A,type_B)->(type_C)
* \param[in] is_comm whether function is commutative
*/
Bivar_Function(std::function<dtype_C (dtype_A, dtype_B)> f_,
bool is_comm)
: CTF_int::bivar_function(is_comm){
f=f_;
}
/**
* \brief default constructor sets function pointer to NULL
*/
Bivar_Function();
/**
* \brief compute c = f(a,b)
* \param[in] a pointer to operand that will be cast to dtype
* \param[in] b pointer to operand that will be cast to dtype
* \param[in,out] c result c+f(*a,b) of applying f on value of (different type) on a
*/
void apply_f(char const * a, char const * b, char * c) const {
((dtype_C*)c)[0] = f(((dtype_A const*)a)[0],((dtype_B const*)b)[0]);
}
/**
* \brief compute c = c+ f(a,b)
* \param[in] a pointer to operand that will be cast to dtype
* \param[in] b pointer to operand that will be cast to dtype
* \param[in,out] c result c+f(*a,b) of applying f on value of (different type) on a
* \param[in] sr_C algebraic structure for b, needed to do add
*/
void acc_f(char const * a, char const * b, char * c, CTF_int::algstrct const * sr_C) const {
dtype_C tmp;
tmp = f(((dtype_A const*)a)[0],((dtype_B const*)b)[0]);
sr_C->add(c, (char const *)&tmp, c);
}
// FIXME: below kernels replicate code from src/interface/semiring.h
void csrmm(int m,
int n,
int k,
dtype_A const * A,
int const * JA,
int const * IA,
int64_t nnz_A,
dtype_B const * B,
dtype_C * C,
CTF_int::algstrct const * sr_C) const {
//TAU_FSTART(3type_csrmm);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int row_A=0; row_A<m; row_A++){
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int col_B=0; col_B<n; col_B++){
for (int i_A=IA[row_A]-1; i_A<IA[row_A+1]-1; i_A++){
int col_A = JA[i_A]-1;
dtype_C tmp = f(A[i_A],B[col_B*k+col_A]);
sr_C->add((char const *)&C[col_B*m+row_A],(char const*)&tmp,(char *)&C[col_B*m+row_A]);
}
}
}
//TAU_FSTOP(3type_csrmm);
}
void csrmultd
(int m,
int n,
int k,
dtype_A const * A,
int const * JA,
int const * IA,
int nnz_A,
dtype_B const * B,
int const * JB,
int const * IB,
int nnz_B,
dtype_C * C,
CTF_int::algstrct const * sr_C) const {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int row_A=0; row_A<m; row_A++){
for (int i_A=IA[row_A]-1; i_A<IA[row_A+1]-1; i_A++){
int row_B = JA[i_A]-1; //=col_A
for (int i_B=IB[row_B]-1; i_B<IB[row_B+1]-1; i_B++){
int col_B = JB[i_B]-1;
dtype_C tmp = f(A[i_A],B[i_B]);
sr_C->add((char const*)&C[col_B*m+row_A],(char const*)&tmp,(char *)&C[col_B*m+row_A]);
}
}
}
}
void csrmultcsr
(int m,
int n,
int k,
dtype_A const * A,
int const * JA,
int const * IA,
int nnz_A,
dtype_B const * B,
int const * JB,
int const * IB,
int nnz_B,
char *& C_CSR,
CTF_int::algstrct const * sr_C) const {
int * IC = (int*)CTF_int::alloc(sizeof(int)*(m+1));
int * has_col = (int*)CTF_int::alloc(sizeof(int)*n);
IC[0] = 1;
for (int i=0; i<m; i++){
memset(has_col, 0, sizeof(int)*n);
IC[i+1] = IC[i];
CTF_int::CSR_Matrix::compute_has_col(JA, IA, JB, IB, i, has_col);
for (int j=0; j<n; j++){
IC[i+1] += has_col[j];
}
}
CTF_int::CSR_Matrix C(IC[m]-1, m, n, sr_C);
dtype_C * vC = (dtype_C*)C.vals();
int * JC = C.JA();
memcpy(C.IA(), IC, sizeof(int)*(m+1));
CTF_int::cdealloc(IC);
IC = C.IA();
int64_t * rev_col = (int64_t*)CTF_int::alloc(sizeof(int64_t)*n);
for (int i=0; i<m; i++){
memset(has_col, 0, sizeof(int)*n);
CTF_int::CSR_Matrix::compute_has_col(JA, IA, JB, IB, i, has_col);
int vs = 0;
for (int j=0; j<n; j++){
if (has_col[j]){
JC[IC[i]+vs-1] = j+1;
rev_col[j] = IC[i]+vs-1;
vs++;
}
}
memset(has_col, 0, sizeof(int)*n);
for (int j=0; j<IA[i+1]-IA[i]; j++){
int row_B = JA[IA[i]+j-1]-1;
int idx_A = IA[i]+j-1;
for (int l=0; l<IB[row_B+1]-IB[row_B]; l++){
int idx_B = IB[row_B]+l-1;
if (has_col[JB[idx_B]-1]){
dtype_C tmp = f(A[idx_A],B[idx_B]);
sr_C->add((char const *)&vC[rev_col[JB[idx_B]-1]], (char const *)&tmp, (char *)&vC[rev_col[JB[idx_B]-1]]);
} else {
vC[rev_col[JB[idx_B]-1]] = f(A[idx_A],B[idx_B]);
}
has_col[JB[idx_B]-1] = 1;
}
}
}
CTF_int::CSR_Matrix C_in(C_CSR);
if (C_CSR == NULL || C_in.nnz() == 0){
C_CSR = C.all_data;
} else {
char * ans = CTF_int::CSR_Matrix::csr_add(C_CSR, C.all_data, sr_C);
CTF_int::cdealloc(C.all_data);
C_CSR = ans;
}
CTF_int::cdealloc(has_col);
CTF_int::cdealloc(rev_col);
}
void ccsrmm(int m,
int n,
int k,
char const * A,
int const * JA,
int const * IA,
int64_t nnz_A,
char const * B,
char * C,
CTF_int::algstrct const * sr_C) const {
csrmm(m,n,k,(dtype_A const *)A,JA,IA,nnz_A,(dtype_B const *)B, (dtype_C *)C, sr_C);
}
void ccsrmultd
(int m,
int n,
int k,
char const * A,
int const * JA,
int const * IA,
int nnz_A,
char const * B,
int const * JB,
int const * IB,
int nnz_B,
char * C,
CTF_int::algstrct const * sr_C) const {
csrmultd(m,n,k,(dtype_A const *)A,JA,IA,nnz_A,(dtype_B const *)B,JB,IB,nnz_B,(dtype_C *)C,sr_C);
}
void ccsrmultcsr
(int m,
int n,
int k,
char const * A,
int const * JA,
int const * IA,
int nnz_A,
char const * B,
int const * JB,
int const * IB,
int nnz_B,
char *& C_CSR,
CTF_int::algstrct const * sr_C) const {
csrmultcsr(m,n,k,(dtype_A const *)A,JA,IA,nnz_A,(dtype_B const *)B, JB, IB, nnz_B, C_CSR, sr_C);
}
};
/**
* \brief custom function f : (X * Y * Z) -> Z applied on three tensors as contraction:
* e.g. f(A["ij"],B["ij"],C["ij"])
*/
template<typename dtype_A=double, typename dtype_B=dtype_A, typename dtype_C=dtype_A>
class Bivar_Transform : public CTF_int::bivar_function {
public:
/**
* \brief function signature for element-wise multiplication, compute b=f(a)
*/
//void (*f)(dtype_A, dtype_B &);
std::function<void(dtype_A, dtype_B, dtype_C &)> f;
/**
* \brief constructor takes function pointers to compute B=f(A));
* \param[in] f_ linear function (type_A)->(type_B)
*/
Bivar_Transform(std::function<void(dtype_A, dtype_B, dtype_C &)> f_)
: CTF_int::bivar_function() {
f = f_;
}
/**
* \brief constructor takes function pointers to compute C=f(A,B);
* \param[in] f_ bivariate function (type_A,type_B)->(type_C)
* \param[in] is_comm whether function is commutative
*/
Bivar_Transform(std::function<void(dtype_A, dtype_B, dtype_C &)> f_,
bool is_comm)
: CTF_int::bivar_function(is_comm){
f=f_;
}
/**
* \brief compute f(a,b)
* \param[in] a pointer to first operand
* \param[in] b pointer to second operand
* \param[in,out] c value that is accumulated to
* \param[in] sr_B algebraic structure for b, here is ignored
*/
void acc_f(char const * a, char const * b, char * c, CTF_int::algstrct const * sr_B) const {
f(((dtype_A*)a)[0], ((dtype_B*)b)[0], ((dtype_C*)c)[0]);
}
/**
* \brief apply function f to value stored at a, for an accumulator, this is the same as acc_f below
* \param[in] a pointer to operand that will be cast to dtype
* \param[in] b pointer to second operand that will be cast to dtype
* \param[in,out] c result &f(*a,*b) of applying f on value of (different type) on a
*/
void apply_f(char const * a, char const * b, char * c) const { acc_f(a,b,c,NULL); }
bool is_accumulator() const { return true; }
};
template<typename dtype_A=double, typename dtype_B=dtype_A, typename dtype_C=dtype_A>
class Function {
public:
bool is_univar;
Univar_Function<dtype_A, dtype_B> * univar;
bool is_bivar;
Bivar_Function<dtype_A, dtype_B, dtype_C> * bivar;
Function(std::function<dtype_B(dtype_A)> f_){
is_univar = true;
is_bivar = false;
univar = new Univar_Function<dtype_A, dtype_B>(f_);
}
Function(std::function<dtype_C(dtype_A,dtype_B)> f_, bool is_comm=false){
is_univar = false;
is_bivar = true;
bivar = new Bivar_Function<dtype_A, dtype_B, dtype_C>(f_,is_comm);
}
CTF_int::Unifun_Term operator()(CTF_int::Term const & A) const {
assert(is_univar);
return univar->operator()(A);
}
CTF_int::Bifun_Term operator()(CTF_int::Term const & A, CTF_int::Term const & B) const {
assert(is_bivar);
return bivar->operator()(A,B);
}
operator Univar_Function<dtype_A, dtype_B>() const {
assert(is_univar);
return *univar;
}
operator Bivar_Function<dtype_A, dtype_B, dtype_C>() const {
assert(is_bivar);
return *bivar;
}
~Function(){
if (is_univar) delete(univar);
if (is_bivar) delete(bivar);
}
};
template<typename dtype_A=double, typename dtype_B=dtype_A, typename dtype_C=dtype_A>
class Transform {
public:
bool is_endo;
Endomorphism<dtype_A> * endo;
bool is_univar;
Univar_Transform<dtype_A, dtype_B> * univar;
bool is_bivar;
Bivar_Transform<dtype_A, dtype_B, dtype_C> * bivar;
Transform(std::function<void(dtype_A&)> f_){
is_endo = true;
is_univar = false;
is_bivar = false;
endo = new Endomorphism<dtype_A>(f_);
}
Transform(std::function<void(dtype_A, dtype_B&)> f_){
is_endo = false;
is_univar = true;
is_bivar = false;
univar = new Univar_Transform<dtype_A, dtype_B>(f_);
}
Transform(std::function<void(dtype_A, dtype_B, dtype_C&)> f_){
is_endo = false;
is_univar = false;
is_bivar = true;
bivar = new Bivar_Transform<dtype_A, dtype_B, dtype_C>(f_);
}
~Transform(){
if (is_endo) delete endo;
if (is_univar) delete univar;
if (is_bivar) delete bivar;
}
void operator()(CTF_int::Term const & A) const {
assert(is_endo);
endo->operator()(A);
}
void operator()(CTF_int::Term const & A, CTF_int::Term const & B) const {
assert(is_univar);
univar->operator()(A,B);
}
void operator()(CTF_int::Term const & A, CTF_int::Term const & B, CTF_int::Term const & C) const {
assert(is_bivar);
bivar->operator()(A,B,C);
}
operator Bivar_Transform<dtype_A, dtype_B, dtype_C>(){
assert(is_bivar);
return *bivar;
}
operator Univar_Transform<dtype_A, dtype_B>(){
assert(is_univar);
return *univar;
}
operator Endomorphism<dtype_A>(){
assert(is_endo);
return *endo;
}
bool is_accumulator() const { return true; }
};
/**
* @}
*/
}
#endif
|
WaveFunctionComponent.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
// Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory
// Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
// Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
// Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory
// Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H
#define QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H
#include "Message/Communicate.h"
#include "Configuration.h"
#include "Particle/ParticleSet.h"
#include "Particle/VirtualParticleSet.h"
#include "Particle/DistanceTableData.h"
#include "OhmmsData/RecordProperty.h"
#include "QMCWaveFunctions/OrbitalSetTraits.h"
#include "Particle/MCWalkerConfiguration.h"
#include "type_traits/template_types.hpp"
#ifdef QMC_CUDA
#include "type_traits/CUDATypes.h"
#endif
/**@file WaveFunctionComponent.h
*@brief Declaration of WaveFunctionComponent
*/
namespace qmcplusplus
{
#ifdef QMC_CUDA
struct NLjob
{
int walker;
int elec;
int numQuadPoints;
NLjob(int w, int e, int n) : walker(w), elec(e), numQuadPoints(n) {}
};
#endif
///forward declaration of WaveFunctionComponent
class WaveFunctionComponent;
///forward declaration of DiffWaveFunctionComponent
class DiffWaveFunctionComponent;
typedef WaveFunctionComponent* WaveFunctionComponentPtr;
typedef DiffWaveFunctionComponent* DiffWaveFunctionComponentPtr;
/**@defgroup WaveFunctionComponent group
* @brief Classes which constitute a many-body trial wave function
*
* A many-body trial wave function is
* \f[
\Psi(\{ {\bf R}\}) = \prod_i \psi_{i}(\{ {\bf R}\}),
* \f]
* where \f$\Psi\f$s are represented by
* the derived classes from WaveFunctionComponent.
*/
/** @ingroup WaveFunctionComponent
* @brief An abstract class for a component of a many-body trial wave function
*
* mw_ prefix is a function name signature indicating it is for handling a batch of WaveFunctionComponent objects
* which are required to be base class pointers of the same derived class type.
* all the mw_ routines must be implemented in a way either stateless or maintains states of every walker.
*/
struct WaveFunctionComponent : public QMCTraits
{
/** enum for a update mode */
enum
{
ORB_PBYP_RATIO, /*!< particle-by-particle ratio only */
ORB_PBYP_ALL, /*!< particle-by-particle, update Value-Gradient-Laplacian */
ORB_PBYP_PARTIAL, /*!< particle-by-particle, update Value and Grdient */
ORB_WALKER, /*!< walker update */
ORB_ALLWALKER /*!< all walkers update */
};
typedef ParticleAttrib<ValueType> ValueVectorType;
typedef ParticleAttrib<GradType> GradVectorType;
typedef ParticleSet::Walker_t Walker_t;
typedef Walker_t::WFBuffer_t WFBufferType;
typedef Walker_t::Buffer_t BufferType;
typedef OrbitalSetTraits<RealType>::ValueMatrix_t RealMatrix_t;
typedef OrbitalSetTraits<ValueType>::ValueMatrix_t ValueMatrix_t;
typedef OrbitalSetTraits<ValueType>::GradMatrix_t GradMatrix_t;
typedef OrbitalSetTraits<ValueType>::HessType HessType;
typedef OrbitalSetTraits<ValueType>::HessVector_t HessVector_t;
// the value type for log(psi)
using LogValueType = std::complex<QTFull::RealType>;
// the value type for psi(r')/psi(r)
using PsiValueType = QTFull::ValueType;
/** flag to set the optimization mode */
bool IsOptimizing;
/** boolean to set optimization
*
* If true, this object is actively modified during optimization
*/
bool Optimizable;
/** true, if this component is fermionic */
bool is_fermionic;
/** current update mode */
int UpdateMode;
/** current \f$\log\phi \f$
*/
LogValueType LogValue;
/** Pointer to the differential WaveFunctionComponent of this object
*
* If dPsi=0, this WaveFunctionComponent is constant with respect to the optimizable variables
*/
DiffWaveFunctionComponentPtr dPsi;
/** A vector for \f$ \frac{\partial \nabla \log\phi}{\partial \alpha} \f$
*/
GradVectorType dLogPsi;
/** A vector for \f$ \frac{\partial \nabla^2 \log\phi}{\partial \alpha} \f$
*/
ValueVectorType d2LogPsi;
/** Name of the class derived from WaveFunctionComponent
*/
std::string ClassName;
///list of variables this WaveFunctionComponent handles
opt_variables_type myVars;
///Bytes in WFBuffer
size_t Bytes_in_WFBuffer;
/// default constructor
WaveFunctionComponent();
//WaveFunctionComponent(const WaveFunctionComponent& old);
///default destructor
virtual ~WaveFunctionComponent() {}
inline void setOptimizable(bool optimizeit) { Optimizable = optimizeit; }
///assign a differential WaveFunctionComponent
virtual void setDiffOrbital(DiffWaveFunctionComponentPtr d);
///assembles the full value
PsiValueType getValue() const
{
return LogToValue<PsiValueType>::convert(LogValue);
}
/** check in optimizable parameters
* @param active a super set of optimizable variables
*
* Add the paramemters this WaveFunctionComponent manage to active.
*/
virtual void checkInVariables(opt_variables_type& active) = 0;
/** check out optimizable variables
*
* Update myVars index map
*/
virtual void checkOutVariables(const opt_variables_type& active) = 0;
/** reset the parameters during optimizations
*/
virtual void resetParameters(const opt_variables_type& active) = 0;
/** print the state, e.g., optimizables */
virtual void reportStatus(std::ostream& os) = 0;
/** reset properties, e.g., distance tables, for a new target ParticleSet
* @param P ParticleSet
*/
virtual void resetTargetParticleSet(ParticleSet& P) = 0;
/** evaluate the value of the WaveFunctionComponent from scratch
* @param P active ParticleSet
* @param G Gradients, \f$\nabla\ln\Psi\f$
* @param L Laplacians, \f$\nabla^2\ln\Psi\f$
* @return the log value
*
* Mainly for walker-by-walker move. The initial stage of particle-by-particle
* move also uses this.
*/
virtual LogValueType evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L) = 0;
/** evaluate from scratch the same type WaveFunctionComponent of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param G_list the list of Gradients pointers in a walker batch, \f$\nabla\ln\Psi\f$
* @param L_list the list of Laplacians pointers in a walker batch, \f$\nabla^2\ln\Psi\f$
* @@param values the log WF values of walkers in a batch
*/
virtual void mw_evaluateLog(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
const std::vector<ParticleSet::ParticleGradient_t*>& G_list,
const std::vector<ParticleSet::ParticleLaplacian_t*>& L_list)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw]->evaluateLog(*P_list[iw], *G_list[iw], *L_list[iw]);
}
/** recompute the value of the WaveFunctionComponents which require critical accuracy.
* needed for Slater Determinants but not needed for most types of WaveFunctionComponents
*/
virtual void recompute(ParticleSet& P) {}
// virtual void evaluateHessian(ParticleSet& P, IndexType iat, HessType& grad_grad_psi)
// {
// APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented");
// }
virtual void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi_all)
{
APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented in " + ClassName + " class.");
}
/** return the current gradient for the iat-th particle
* @param P quantum particle set
* @param iat particle index
* @return the gradient of the iat-th particle
*/
virtual GradType evalGrad(ParticleSet& P, int iat)
{
APP_ABORT("WaveFunctionComponent::evalGradient is not implemented in " + ClassName + " class.");
return GradType();
}
/** compute the current gradients for the iat-th particle of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param grad_now the list of gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_evalGrad(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
int iat,
std::vector<GradType>& grad_now)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
grad_now[iw] = WFC_list[iw]->evalGrad(*P_list[iw], iat);
}
/** compute the current gradients for the iat-th particle of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param grad_now the list of gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_evalGrad(const std::vector<std::reference_wrapper<WaveFunctionComponent>>& WFC_list,
const std::vector<std::reference_wrapper<ParticleSet>>& P_list,
int iat,
std::vector<GradType>& grad_now)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
grad_now[iw] = WFC_list[iw].get().evalGrad(P_list[iw].get(), iat);
}
/** return the logarithmic gradient for the iat-th particle
* of the source particleset
* @param Pquantum particle set
* @param iat particle index
* @return the gradient of the iat-th particle
*/
virtual GradType evalGradSource(ParticleSet& P, ParticleSet& source, int iat)
{
// unit_test_hamiltonian calls this function incorrectly; do not abort for now
// APP_ABORT("WaveFunctionComponent::evalGradSource is not implemented");
return GradType();
}
/** Adds the gradient w.r.t. the iat-th particle of the
* source particleset (ions) of the logarithmic gradient
* and laplacian w.r.t. the target paritlceset (electrons).
* @param P quantum particle set (electrons)
* @param source classical particle set (ions)
* @param iat particle index of source (ion)
* @param the ion gradient of the elctron gradient
* @param the ion gradient of the elctron laplacian.
* @return the log gradient of psi w.r.t. the source particle iat
*/
virtual GradType evalGradSource(ParticleSet& P,
ParticleSet& source,
int iat,
TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad,
TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad)
{
return GradType();
}
/** evaluate the ratio of the new to old WaveFunctionComponent value and the new gradient
* @param P the active ParticleSet
* @param iat the index of a particle
* @param grad_iat Gradient for the active particle
*/
virtual ValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
APP_ABORT("WaveFunctionComponent::ratioGrad is not implemented in " + ClassName + " class.");
return ValueType();
}
/** compute the ratio of the new to old WaveFunctionComponent value and the new gradient of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
* @param grad_now the list of new gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_ratioGrad(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
int iat,
std::vector<PsiValueType>& ratios,
std::vector<GradType>& grad_new)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw]->ratioGrad(*P_list[iw], iat, grad_new[iw]);
}
/** compute the ratio of the new to old WaveFunctionComponent value and the new gradient of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
* @param grad_now the list of new gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_ratioGrad(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
int iat,
std::vector<PsiValueType>& ratios,
std::vector<GradType>& grad_new)
{
//#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw].get().ratioGrad(P_list[iw], iat, grad_new[iw]);
}
/** a move for iat-th particle is accepted. Update the current content.
* @param P target ParticleSet
* @param iat index of the particle whose new position was proposed
*/
virtual void acceptMove(ParticleSet& P, int iat) = 0;
/** moves of the iat-th particle on some walkers in a batch is accepted. Update the current content.
* Note that all the lists only include accepted walkers.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
*/
virtual void mw_acceptMove(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
int iat)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw]->acceptMove(*P_list[iw], iat);
}
/** complete all the delayed updates, must be called after each substep or step during pbyp move
*/
virtual void completeUpdates() {}
/** complete all the delayed updates for all the walkers in a batch
* must be called after each substep or step during pbyp move
*/
virtual void mw_completeUpdates(const std::vector<WaveFunctionComponent*>& WFC_list)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw]->completeUpdates();
}
/** If a move for iat-th particle is rejected, restore to the content.
* @param iat index of the particle whose new position was proposed
*
* Ye: hopefully we can gradually move away from restore
*/
virtual void restore(int iat) = 0;
/** If a move for iat-th particle on some walkers in a batch is rejected, restore their contents
* Note that all the lists only include rejected walkers.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param iat index of the particle whose new position was proposed
*
* Ye: hopefully we can gradually move away from restore
*/
virtual void mw_restore(const std::vector<WaveFunctionComponent*>& WFC_list, int iat)
{
//#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw]->restore(iat);
}
/** evaluate the ratio of the new to old WaveFunctionComponent value
* @param P the active ParticleSet
* @param iat the index of a particle
* @return \f$ \psi( \{ {\bf R}^{'} \} )/ \psi( \{ {\bf R}\})\f$
*
* Specialized for particle-by-particle move
*/
virtual ValueType ratio(ParticleSet& P, int iat) = 0;
/** compute the ratio of the new to old WaveFunctionComponent value of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
*/
virtual void mw_calcRatio(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
int iat,
std::vector<PsiValueType>& ratios)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw]->ratio(*P_list[iw], iat);
}
/** compute the ratio of the new to old WaveFunctionComponent value of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
*/
virtual void mw_calcRatio(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
int iat,
std::vector<PsiValueType>& ratios)
{
//#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw].get().ratio(P_list[iw], iat);
}
/** For particle-by-particle move. Requests space in the buffer
* based on the data type sizes of the objects in this class.
* @param P particle set
* @param buf Anonymous storage
*/
virtual void registerData(ParticleSet& P, WFBufferType& buf) = 0;
/** For particle-by-particle move. Requests space in the buffer
* based on the data type sizes of the objects in this class.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param buf_list Anonymous storage
*/
virtual void mw_registerData(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
const std::vector<WFBufferType*>& buf_list)
{
// We can't make this static but we can use a lambda with no capture to
// restrict access to *this scope
auto registerComponentData = [](WaveFunctionComponent& wfc, ParticleSet& pset, WFBufferType& wfb) {
wfc.registerData(pset, wfb);
};
for (int iw = 0; iw < WFC_list.size(); iw++)
registerComponentData(*(WFC_list[iw]), *(P_list[iw]), *(buf_list[iw]));
}
/** For particle-by-particle move. Put the objects of this class
* in the walker buffer or forward the memory cursor.
* @param P particle set
* @param buf Anonymous storage
* @param fromscratch request recomputing the precision critical
* pieces of wavefunction from scratch
* @return log value of the wavefunction.
*/
virtual LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) = 0;
/** For particle-by-particle move. Put the objects of this class
* in the walker buffer or forward the memory cursor.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param buf_list Anonymous storage
* @@param values the log WF values of walkers in a batch
* @param fromscratch request recomputing the precision critical
* pieces of wavefunction from scratch
*/
virtual void mw_updateBuffer(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
const RefVector<WFBufferType>& buf_list,
bool fromscratch = false)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw].get().updateBuffer(P_list[iw], buf_list[iw], fromscratch);
}
/** For particle-by-particle move. Copy data or attach memory
* from a walker buffer to the objects of this class.
* The log value, P.G and P.L contribution from the objects
* of this class are also added.
* @param P particle set
* @param buf Anonymous storage
*/
virtual void copyFromBuffer(ParticleSet& P, WFBufferType& buf) = 0;
/** For particle-by-particle move. Copy data or attach memory
* from a walker buffer to the objects of this class.
* @param P particle set
* @param buf Anonymous storage
*/
virtual void mw_copyFromBuffer(const RefVector<WaveFunctionComponent>& wfc_list,
const RefVector<ParticleSet>& p_list,
const RefVector<WFBufferType>& buf_list)
{
#pragma omp parallel for
for (int iw = 0; iw < wfc_list.size(); iw++)
wfc_list[iw].get().copyFromBuffer(p_list[iw], buf_list[iw]);
}
/** make clone
* @param tqp target Quantum ParticleSet
* @param deepcopy if true, make a decopy
*
* If not true, return a proxy class
*/
virtual WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const;
/** Intended as a handle to break
*
*
*/
//virtual WaveFunctionComponentPtr makeThrScope(std::vector<std::pair<int,int>>& ptcl_group_indexes) const = 0;
/** Return the Chiesa kinetic energy correction
*/
virtual RealType KECorrection();
/** Compute derivatives of the wavefunction with respect to the optimizable
* parameters.
* @param P particle set
* @param optvars optimizable parameters
* @param dlogpsi array of derivatives of the log of the wavefunction
* @param dhpsioverpsi array of derivatives of the Laplacian of the wavefunction divided by the wavefunction.
* Note that this does not use the Laplacian of the log of the wavefunction, as in evaluateLog.
* Also the factor of -1/2 from the kinetic energy must be included here. The 1/m
* factor is applied in TrialWaveFunction.
*/
virtual void evaluateDerivatives(ParticleSet& P,
const opt_variables_type& optvars,
std::vector<ValueType>& dlogpsi,
std::vector<ValueType>& dhpsioverpsi);
/** Compute derivatives of rhe wavefunction with respect to the optimizable
* parameters
* @param P particle set
* @param optvars optimizable parameters
* @param dlogpsi array of derivatives of the log of the wavefunction
* Note: this function differs from the evaluateDerivatives function in the way that it only computes
* the derivative of the log of the wavefunction.
*/
virtual void evaluateDerivativesWF(ParticleSet& P,
const opt_variables_type& optvars,
std::vector<ValueType>& dlogpsi);
virtual void multiplyDerivsByOrbR(std::vector<ValueType>& dlogpsi)
{
RealType myrat = std::real(LogToValue<PsiValueType>::convert(LogValue));
for (int j = 0; j < myVars.size(); j++)
{
int loc = myVars.where(j);
dlogpsi[loc] *= myrat;
}
}
/** Calculates the derivatives of \f$ \grad(\textrm{log}(\psif)) \f$ with respect to
the optimizable parameters, and the dot product of this is then
performed with the passed-in G_in gradient vector. This object is then
returned as dgradlogpsi.
*/
virtual void evaluateGradDerivatives(const ParticleSet::ParticleGradient_t& G_in, std::vector<ValueType>& dgradlogpsi)
{
APP_ABORT("Need specialization of WaveFunctionComponent::evaluateGradDerivatives in " + ClassName + " class.\n");
}
virtual void finalizeOptimization() {}
/** evaluate the ratios of one virtual move with respect to all the particles
* @param P reference particleset
* @param ratios \f$ ratios[i]=\{{\bf R}\}\rightarrow {r_0,\cdots,r_i^p=pos,\cdots,r_{N-1}}\f$
*/
virtual void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios);
/** evaluate ratios to evaluate the non-local PP
* @param VP VirtualParticleSet
* @param ratios ratios with new positions VP.R[k] the VP.refPtcl
*/
virtual void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios);
/** evaluate ratios to evaluate the non-local PP
* @param VP VirtualParticleSet
* @param ratios ratios with new positions VP.R[k] the VP.refPtcl
* @param dratios \f$\partial_{\alpha}(\ln \Psi ({\bf R}^{\prime}) - \ln \Psi ({\bf R})) \f$
*/
virtual void evaluateDerivRatios(VirtualParticleSet& VP,
const opt_variables_type& optvars,
std::vector<ValueType>& ratios,
Matrix<ValueType>& dratios);
/////////////////////////////////////////////////////
// Functions for vectorized evaluation and updates //
/////////////////////////////////////////////////////
#ifdef QMC_CUDA
using CTS = CUDAGlobalTypes;
virtual void freeGPUmem() {}
virtual void recompute(MCWalkerConfiguration& W, bool firstTime) {}
virtual void reserve(PointerPool<gpu::device_vector<CTS::ValueType>>& pool, int kblocksize) {}
/** Evaluate the log of the WF for all walkers
* @param walkers vector of all walkers
* @param logPsi output vector of log(psi)
*/
virtual void addLog(MCWalkerConfiguration& W, std::vector<RealType>& logPsi)
{
APP_ABORT("Need specialization of WaveFunctionComponent::addLog for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
/** Evaluate the wave-function ratio w.r.t. moving particle iat
* for all walkers
* @param walkers vector of all walkers
* @param iat particle which is moving
* @param psi_ratios output vector with psi_new/psi_old
*/
virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
// Returns the WF ratio and gradient w.r.t. iat for each walker
// in the respective vectors
virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void ratio(MCWalkerConfiguration& W,
int iat,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void calcRatio(MCWalkerConfiguration& W,
int iat,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::calcRatio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void addRatio(MCWalkerConfiguration& W,
int iat,
int k,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::addRatio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void ratio(std::vector<Walker_t*>& walkers,
std::vector<int>& iatList,
std::vector<PosType>& rNew,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void addGradient(MCWalkerConfiguration& W, int iat, std::vector<GradType>& grad)
{
APP_ABORT("Need specialization of WaveFunctionComponent::addGradient for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void calcGradient(MCWalkerConfiguration& W, int iat, int k, std::vector<GradType>& grad)
{
APP_ABORT("Need specialization of WaveFunctionComponent::calcGradient for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void gradLapl(MCWalkerConfiguration& W, GradMatrix_t& grads, ValueMatrix_t& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::gradLapl for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void det_lookahead(MCWalkerConfiguration& W,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl,
int iat,
int k,
int kd,
int nw)
{
APP_ABORT("Need specialization of WaveFunctionComponent::det_lookahead for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void update(MCWalkerConfiguration* W, std::vector<Walker_t*>& walkers, int iat, std::vector<bool>* acc, int k)
{
APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void update(const std::vector<Walker_t*>& walkers, const std::vector<int>& iatList)
{
APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void NLratios(MCWalkerConfiguration& W,
std::vector<NLjob>& jobList,
std::vector<PosType>& quadPoints,
std::vector<ValueType>& psi_ratios)
{
APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void NLratios(MCWalkerConfiguration& W,
gpu::device_vector<CUDA_PRECISION*>& Rlist,
gpu::device_vector<int*>& ElecList,
gpu::device_vector<int>& NumCoreElecs,
gpu::device_vector<CUDA_PRECISION*>& QuadPosList,
gpu::device_vector<CUDA_PRECISION*>& RatioList,
int numQuadPoints)
{
APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void evaluateDerivatives(MCWalkerConfiguration& W,
const opt_variables_type& optvars,
RealMatrix_t& dgrad_logpsi,
RealMatrix_t& dhpsi_over_psi)
{
APP_ABORT("Need specialization of WaveFunctionComponent::evaluateDerivatives for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
#endif
};
} // namespace qmcplusplus
#endif
|
memcpy.c | /*
* Copyright (c) 2020, 2021, Matija Skala <mskala@gmx.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the author nor the
* names of contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Matija Skala <mskala@gmx.com> ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Matija Skala <mskala@gmx.com> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <stdint.h>
#include <immintrin.h>
#include "cpu_features.h"
static void *memcpy_naive(void *restrict dest, const void *restrict src, size_t n) {
char *d = dest;
const char *s = src;
for (size_t i = 0; i < n; i++)
d[i] = s[i];
return dest;
}
__attribute__((__target__("sse2")))
static void *memcpy_sse2(void *restrict dest, const void *restrict src, size_t n) {
char *const d = dest;
const char *const s = src;
size_t f = 0;
if (n < 128) {
while (n >= 64) {
for (int i = 0; i < 4; i++) {
#pragma omp simd
for (int j = 0; j < 16; j++)
d[f+j] = s[f+j];
f += 16;
n -= 16;
}
}
while (n >= 32) {
for (int i = 0; i < 2; i++) {
#pragma omp simd
for (int j = 0; j < 16; j++)
d[f+j] = s[f+j];
f += 16;
n -= 16;
}
}
while (n >= 16) {
for (int i = 0; i < 1; i++) {
#pragma omp simd
for (int j = 0; j < 16; j++)
d[f+j] = s[f+j];
f += 16;
n -= 16;
}
}
memcpy_naive(d + f, s + f, n);
return dest;
}
if (n >= 1 << 21) {
for (int i = 0; i < 8; i++) {
#pragma omp simd
for (int j = 0; j < 16; j++)
d[f+j] = s[f+j];
f += 16;
n -= 16;
}
size_t m = (size_t)d % 128;
f -= m;
n += m;
while (n >= 256) {
__m128i chunk1 = _mm_loadu_si128((const __m128i*)(s+f)+0);
__m128i chunk2 = _mm_loadu_si128((const __m128i*)(s+f)+1);
__m128i chunk3 = _mm_loadu_si128((const __m128i*)(s+f)+2);
__m128i chunk4 = _mm_loadu_si128((const __m128i*)(s+f)+3);
__m128i chunk5 = _mm_loadu_si128((const __m128i*)(s+f)+4);
__m128i chunk6 = _mm_loadu_si128((const __m128i*)(s+f)+5);
__m128i chunk7 = _mm_loadu_si128((const __m128i*)(s+f)+6);
__m128i chunk8 = _mm_loadu_si128((const __m128i*)(s+f)+7);
_mm_prefetch(s+256, _MM_HINT_NTA);
_mm_stream_si128((__m128i*)(d+f)+0, chunk1);
_mm_stream_si128((__m128i*)(d+f)+1, chunk2);
_mm_stream_si128((__m128i*)(d+f)+2, chunk3);
_mm_stream_si128((__m128i*)(d+f)+3, chunk4);
_mm_stream_si128((__m128i*)(d+f)+4, chunk5);
_mm_stream_si128((__m128i*)(d+f)+5, chunk6);
_mm_stream_si128((__m128i*)(d+f)+6, chunk7);
_mm_stream_si128((__m128i*)(d+f)+7, chunk8);
f += 128;
n -= 128;
}
_mm_sfence();
}
while (n >= 128) {
for (int i = 0; i < 8; i++) {
#pragma omp simd
for (int j = 0; j < 16; j++)
d[f+j] = s[f+j];
f += 16;
n -= 16;
}
}
while (n >= 64) {
for (int i = 0; i < 4; i++) {
#pragma omp simd
for (int j = 0; j < 16; j++)
d[f+j] = s[f+j];
f += 16;
n -= 16;
}
}
while (n >= 32) {
for (int i = 0; i < 2; i++) {
#pragma omp simd
for (int j = 0; j < 16; j++)
d[f+j] = s[f+j];
f += 16;
n -= 16;
}
}
while (n >= 16) {
for (int i = 0; i < 1; i++) {
#pragma omp simd
for (int j = 0; j < 16; j++)
d[f+j] = s[f+j];
f += 16;
n -= 16;
}
}
if (n) {
for (int i = 0; i < 1; i++) {
f += n - 16;
#pragma omp simd
for (int j = 0; j < 16; j++)
d[f+j] = s[f+j];
}
}
return dest;
}
static void *memcpy_auto(void *restrict dest, const void *restrict src, size_t n);
static void *(*memcpy_impl)(void *restrict dest, const void *restrict src, size_t n) = memcpy_auto;
static void *memcpy_auto(void *restrict dest, const void *restrict src, size_t n) {
if (has_sse2())
memcpy_impl = memcpy_sse2;
else
memcpy_impl = memcpy_naive;
return memcpy_impl(dest, src, n);
}
void *memcpy(void *restrict dest, const void *restrict src, size_t n) {
return memcpy_impl(dest, src, n);
}
|
ast-dump-openmp-target-teams-distribute.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target teams distribute
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target teams distribute
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target teams distribute collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target teams distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target teams distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:4:1, col:36>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:10:1, col:36>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:17:1, col:48>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:46> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:24:1, col:48>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:46> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetTeamsDistributeDirective {{.*}} <line:31:1, col:48>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:46> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
drx_c.c |
#include <geometry/basic_c.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_vector.h>
#include <math.h>
#include <stdio.h>
// author: Andrew Liew <liew@arch.ethz.ch>
// copyright: Copyright 2018, BLOCK Research Group - ETH Zurich
// license: MIT License
// email: liew@arch.ethz.ch
void drx_solver_c(
double tol, // Tolerance value.
int steps, // Maximum number of steps.
int summary, // Print summary at end (1:yes or 0:no).
int m, // Number of elements.
int n, // Number of nodes.
int *u, // Element start node.
int *v, // Element end node.
double *X, // Nodal co-ordinates.
double *f0, // Initial edge forces.
double *l0, // Initial edge lengths.
double *k0, // Initial edge axial stiffnesses.
int *ind_c, // Indices of compression only edges.
int *ind_t, // Indices of tension only edges.
int ind_c_n, // Length of ind_c.
int ind_t_n, // Length of ind_t.
double *B, // Constraint conditions Bx, By, Bz.
double *P, // Nodal loads Px, Py, Pz.
double *S, // Shear forces Sx, Sy, Sz.
int *rows, // Rows of Ct.
int *cols, // Columns of Ct.
double *vals, // Values of Ct.
int nv, // Length of rows/cols/vals.
double *M, // Mass matrix.
double factor, // Convergence factor.
double *V, // Nodal velocities Vx, Vy, Vz.
int *inds, // Indices of beam element start nodes.
int *indi, // Indices of beam element intermediate nodes.
int *indf, // Indices of beam element finish nodes beams.
double *EIx, // Nodal EIx flexural stiffnesses.
double *EIy, // Nodal EIy flexural stiffnesses.
int beams, // Includes beams:1 or not:0.
int nb) // Length of inds/indi/indf.
{
int a;
int b;
int c;
int i;
int j;
int k;
int nans[nb];
int ts;
double alpha;
double f[m];
double fx[m];
double fy[m];
double fz[m];
double frx[n];
double fry[n];
double frz[n];
double kappa;
double l;
double La;
double Lb;
double Lc;
double LQn;
double Lmu;
double Lc1;
double Lc2;
double Mi;
double Ms;
double q;
double res;
double Rx;
double Ry;
double Rz;
double Rn;
double Uo;
double Un;
double xd;
double yd;
double zd;
gsl_vector *Xs = gsl_vector_alloc(3);
gsl_vector *Xi = gsl_vector_alloc(3);
gsl_vector *Xf = gsl_vector_alloc(3);
gsl_vector *Qa = gsl_vector_alloc(3);
gsl_vector *Qb = gsl_vector_alloc(3);
gsl_vector *Qc = gsl_vector_alloc(3);
gsl_vector *Qn = gsl_vector_alloc(3);
gsl_vector *mu = gsl_vector_alloc(3);
gsl_vector *ex = gsl_vector_alloc(3);
gsl_vector *ey = gsl_vector_alloc(3);
gsl_vector *ez = gsl_vector_alloc(3);
gsl_vector *K = gsl_vector_alloc(3);
gsl_vector *Kx = gsl_vector_alloc(3);
gsl_vector *Ky = gsl_vector_alloc(3);
gsl_vector *Mc = gsl_vector_alloc(3);
gsl_vector *ua = gsl_vector_alloc(3);
gsl_vector *ub = gsl_vector_alloc(3);
gsl_vector *c1 = gsl_vector_alloc(3);
gsl_vector *c2 = gsl_vector_alloc(3);
gsl_matrix *Sa = gsl_matrix_alloc(nb, 3);
gsl_matrix *Sb = gsl_matrix_alloc(nb, 3);
ts = 0;
Uo = 0.;
res = 1000. * tol;
while (ts <= steps && res > tol)
{
#pragma omp parallel for private(i,j,k,xd,yd,zd,l,q)
for (i = 0; i < m; i++)
{
j = 3 * v[i];
k = 3 * u[i];
xd = X[j + 0] - X[k + 0];
yd = X[j + 1] - X[k + 1];
zd = X[j + 2] - X[k + 2];
l = gsl_hypot3(xd, yd, zd);
f[i] = f0[i] + k0[i] * (l - l0[i]);
q = f[i] / l;
fx[i] = xd * q;
fy[i] = yd * q;
fz[i] = zd * q;
}
if (ind_t_n > 0)
{
#pragma omp parallel for private(i)
for (i = 0; i < ind_t_n; i++)
{
if (f[i] < 0)
{
fx[i] = 0;
fy[i] = 0;
fz[i] = 0;
}
}
}
if (ind_c_n > 0)
{
#pragma omp parallel for private(i)
for (i = 0; i < ind_c_n; i++)
{
if (f[i] > 0)
{
fx[i] = 0;
fy[i] = 0;
fz[i] = 0;
}
}
}
if (beams)
{
#pragma omp parallel for private(i,j)
for (i = 0; i < n; i++)
{
j = i * 3;
S[j + 0] = 0.;
S[j + 1] = 0.;
S[j + 2] = 0.;
}
for (i = 0; i < nb; i++)
{
a = inds[i] * 3;
b = indi[i] * 3;
c = indf[i] * 3;
vector_from_pointer(&X[a], Xs);
vector_from_pointer(&X[b], Xi);
vector_from_pointer(&X[c], Xf);
subtract_vectors(Xi, Xs, Qa);
subtract_vectors(Xf, Xi, Qb);
subtract_vectors(Xf, Xs, Qc);
cross_vectors(Qa, Qb, Qn);
subtract_vectors(Xf, Xs, mu);
scale_vector(mu, 0.5);
La = length_vector(Qa);
Lb = length_vector(Qb);
Lc = length_vector(Qc);
LQn = length_vector(Qn);
Lmu = length_vector(mu);
alpha = acos((gsl_pow_2(La) + gsl_pow_2(Lb) - gsl_pow_2(Lc)) / (2. * La * Lb));
kappa = 2. * sin(alpha) / Lc;
gsl_vector_memcpy(ex, Qn);
gsl_vector_memcpy(ez, mu);
scale_vector(ex, 1./LQn);
scale_vector(ez, 1./Lmu);
cross_vectors(ez, ex, ey);
gsl_vector_memcpy(K, Qn);
scale_vector(K, kappa/LQn);
gsl_vector_memcpy(Kx, ex);
gsl_vector_memcpy(Ky, ey);
scale_vector(Kx, dot_vectors(K, ex));
scale_vector(Ky, dot_vectors(K, ey));
scale_vector(Kx, EIx[i]);
scale_vector(Ky, EIy[i]);
add_vectors(Kx, Ky, Mc);
cross_vectors(Mc, Qa, ua);
cross_vectors(Mc, Qb, ub);
normalize_vector(ua);
normalize_vector(ub);
cross_vectors(Qa, ua, c1);
cross_vectors(Qb, ub, c2);
Lc1 = length_vector(c1);
Lc2 = length_vector(c2);
Ms = length_vector_squared(Mc);
scale_vector(ua, Ms * Lc1 / (La * dot_vectors(Mc, c1)));
scale_vector(ub, Ms * Lc2 / (Lb * dot_vectors(Mc, c2)));
for (j = 0; j < 3; j++)
{
gsl_matrix_set(Sa, i, j, gsl_vector_get(ua, j));
gsl_matrix_set(Sb, i, j, gsl_vector_get(ub, j));
}
}
#pragma omp parallel for private(i,j)
for (i = 0; i < nb; i++)
{
nans[i] = 0;
for (j = 0; j < 3; j++)
{
if (gsl_isnan(gsl_matrix_get(Sa, i, j)) || gsl_isnan(gsl_matrix_get(Sb, i, j)))
{
nans[i] = 1;
break;
}
}
}
for (i = 0; i < nb; i++)
{
a = inds[i] * 3;
b = indi[i] * 3;
c = indf[i] * 3;
if (nans[i] == 0)
{
for (j = 0; j < 3; j++)
{
S[a + j] += gsl_matrix_get(Sa, i, j);
S[b + j] -= (gsl_matrix_get(Sa, i, j) + gsl_matrix_get(Sb, i, j));
S[c + j] += gsl_matrix_get(Sb, i, j);
}
}
}
}
#pragma omp parallel for private(i)
for (i = 0; i < n; i++)
{
frx[i] = 0;
fry[i] = 0;
frz[i] = 0;
}
for (i = 0; i < nv; i++)
{
frx[rows[i]] += vals[i] * fx[cols[i]];
fry[rows[i]] += vals[i] * fy[cols[i]];
frz[rows[i]] += vals[i] * fz[cols[i]];
}
Un = 0.;
Rn = 0.;
#pragma omp parallel for private(i,j,Rx,Ry,Rz,Mi) reduction(+:Un,Rn)
for (i = 0; i < n; i++)
{
j = 3 * i;
Rx = (P[j + 0] - S[j + 0] - frx[i]) * B[j + 0];
Ry = (P[j + 1] - S[j + 1] - fry[i]) * B[j + 1];
Rz = (P[j + 2] - S[j + 2] - frz[i]) * B[j + 2];
Rn += gsl_hypot3(Rx, Ry, Rz);
Mi = M[i] * factor;
V[j + 0] += Rx / Mi;
V[j + 1] += Ry / Mi;
V[j + 2] += Rz / Mi;
Un += Mi * (gsl_pow_2(V[j + 0]) + gsl_pow_2(V[j + 1]) + gsl_pow_2(V[j + 2]));
}
if (Un < Uo)
{
#pragma omp parallel for private(i,j)
for (i = 0; i < n; i++)
{
j = 3 * i;
V[j + 0] = 0.;
V[j + 1] = 0.;
V[j + 2] = 0.;
}
}
Uo = Un;
#pragma omp parallel for private(i,j)
for (i = 0; i < n; i++)
{
j = 3 * i;
X[j + 0] += V[j + 0];
X[j + 1] += V[j + 1];
X[j + 2] += V[j + 2];
}
res = Rn / n;
ts++;
}
if (summary == 1)
{
printf("Step: %i, Residual: %f\n", ts - 1, res);
}
}
|
target_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target simd'}}
#pragma omp target simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target simd'}}
#pragma omp target simd foo
void test_no_clause() {
int i;
#pragma omp target simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target simd' must be a for loop}}
#pragma omp target simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
#pragma omp target simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
#pragma omp target simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
#pragma omp target simd private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
#pragma omp target simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target simd collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target simd collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
#pragma omp target simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target simd', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target simd collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target simd collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target simd lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target simd firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target simd simdlen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target simd simdlen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target simd simdlen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target simd simdlen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
|
elemwise_binary_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template<typename OP, int Req>
struct MissingRValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template<typename OP, int Req>
struct MissingLValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType {
kTempSpace
};
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template<typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu> *s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType> *out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseNone_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1)
/ DataType<DType>::kLanes);
const DType *ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType *lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType *rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseIn_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>();
const DType *ograd_dptr = inputs[0].dptr<DType>();
const DType *lhs_dptr = inputs[1].dptr<DType>();
const DType *rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const int size = static_cast<int>(
(outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch(
s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const int size = static_cast<int>(
(outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch(
s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
}
template<
typename xpu,
typename LOP,
typename ROP,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void RspRspOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
RspRspOp<LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0],
false, false, false, false);
// lhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0],
false, false, true, false);
}
// rhs grad
if (req[1] != kNullOp) {
RspRspOp<ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1],
false, false, false, false);
// rhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1],
false, false, true, false);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void DnsCsrCsrOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
const bool supported_ops = std::is_same<mshadow_op::right, LOP>::value &&
std::is_same<mshadow_op::left, ROP>::value;
CHECK(supported_ops)
<< "Only backward for mul is supported (LOP should be right, ROP should be left)";
const NDArray& out_grad = inputs[0];
const NDArray& lhs_in = inputs[1];
const NDArray& rhs_in = inputs[2];
const NDArray& lhs_grad = outputs[0];
const NDArray& rhs_grad = outputs[1];
const bool reverse = (outputs[0].storage_type() == kCSRStorage);
if (reverse) {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, rhs_in, req[0], lhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), lhs_in.data()}, {req[1]},
{rhs_grad.data()});
} else {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, lhs_in, req[1], rhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), rhs_in.data()}, {req[0]},
{lhs_grad.data()});
}
}
public:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsCsrDnsOp(mshadow::Stream<xpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsCsrCsrOp(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- RSP binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsRspDnsOp(mshadow::Stream<xpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
/*!
* \brief Allow one of the binary inputs to be dense and still produce a sparse output.
* Typically used for sparse * dense = sparse.
* Note: for csr, it dispatches to fallback other than csr, csr -> csr
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns -> dns
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) {
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) {
// csr, dns -> csr
// dns, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
/*!
* \brief Allow one of the inputs to be dense and produce a dense output,
* for rsp inputs only support when both inputs are rsp type.
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template<bool cpu_only, bool rsp, bool csr>
static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2);
CHECK_EQ(out_attrs->size(), 1);
const auto lhs_stype = (*in_attrs)[0];
const auto rhs_stype = (*in_attrs)[1];
bool dispatched = false;
const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns ... -> dns
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp, ... -> rsp
dispatched = storage_type_assign(out_attrs, kRowSparseStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr, ... -> csr
dispatched = storage_type_assign(out_attrs, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) ||
(lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) {
// dense, csr -> dense / csr, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) {
// dense, rsp -> dense / rsp, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) &&
(out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
} else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrDnsOp<xpu, OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kRowSparseStorage);
const NDArray& rsp = (reverse)? inputs[0] : inputs[1];
DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1],
req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kCSRStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<
typename xpu, typename LOP, typename ROP,
bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false>
static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto out_grad_stype = inputs[0].storage_type();
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
RspRspOpBackward<xpu, LOP, ROP, in0_ok_dense, in1_ok_dense, in2_ok_dense>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
}
if (((lhs_grad_stype == kDefaultStorage && rhs_grad_stype == kCSRStorage) ||
(lhs_grad_stype == kCSRStorage && rhs_grad_stype == kDefaultStorage)) &&
out_grad_stype == kDefaultStorage) {
// dns, csr, dns -> [csr, dns] / csr, dns, dns -> [dns, csr]
DnsCsrCsrOpBackward<xpu, LOP, ROP>(attrs, ctx, inputs, req, outputs);
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, with FComputeEx for csr and rsp available.
when inputs contain both sparse and dense, sparse output is preferred. */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferSparseStorageType) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
/*! \brief Binary launch, with FComputeEx for prefer dense */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.