hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
6b3d6c7762bda83c46d2db39f10a06edf6038b0a.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/base_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
if (this->output_labels_) {
if (this->box_label_) {
for (int i = 0; i < top.size() - 1; ++i) {
top[i+1]->ReshapeLike(*(batch->multi_label_[i]));
caffe_copy(batch->multi_label_[i]->count(), batch->multi_label_[i]->gpu_data(),
top[i+1]->mutable_gpu_data());
}
} else {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
}
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer);
} // namespace caffe
| 6b3d6c7762bda83c46d2db39f10a06edf6038b0a.cu | #include <vector>
#include "caffe/layers/base_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
if (this->output_labels_) {
if (this->box_label_) {
for (int i = 0; i < top.size() - 1; ++i) {
top[i+1]->ReshapeLike(*(batch->multi_label_[i]));
caffe_copy(batch->multi_label_[i]->count(), batch->multi_label_[i]->gpu_data(),
top[i+1]->mutable_gpu_data());
}
} else {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
}
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer);
} // namespace caffe
|
2e585557224d9005c64c98089681296593a28c2c.hip | // !!! This is a file automatically generated by hipify!!!
// Define graph of work + dependencies
cudaGraphCreate(&graph);
cudaGraphAddNode(graph, kernel_a, {}, ...);
cudaGraphAddNode(graph, kernel_b, { kernel_a }, ...); // Waits for a
cudaGraphAddNode(graph, kernel_c, { kernel_a }, ...); // Waits for a
cudaGraphAddNode(graph, kernel_d, { kernel_b, kernel_c }, ...); // Waits for b and c
// Instantiate graph and apply optimizations
hipGraphInstantiate(&instance, graph);
// Launch executable graph 100 times
for(int i=0; i<100; i++)
hipGraphLaunch(instance, stream);
| 2e585557224d9005c64c98089681296593a28c2c.cu | // Define graph of work + dependencies
cudaGraphCreate(&graph);
cudaGraphAddNode(graph, kernel_a, {}, ...);
cudaGraphAddNode(graph, kernel_b, { kernel_a }, ...); // Waits for a
cudaGraphAddNode(graph, kernel_c, { kernel_a }, ...); // Waits for a
cudaGraphAddNode(graph, kernel_d, { kernel_b, kernel_c }, ...); // Waits for b and c
// Instantiate graph and apply optimizations
cudaGraphInstantiate(&instance, graph);
// Launch executable graph 100 times
for(int i=0; i<100; i++)
cudaGraphLaunch(instance, stream);
|
f9573bf45702097568972ce8ddf7541831f98939.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file NeighborList_interface.cu
* @author Han Wang <han_wang@math.pku.edu.cn>
* @date Thu Nov 19 12:53:42 2009
*
* @brief Implementation of neighbor list
*
*
*/
#define DEVICE_CODE
#include "NeighborList_interface.h"
#include "Auxiliary.h"
#include "NeighborList.h"
#include <stdio.h>
#include "NonBondedInteraction.h"
#include "Reshuffle_interface.h"
/**
* these are textures for a fast reference of particle position.
*
*/
texture<CoordType, 1, hipReadModeElementType> global_texRef_neighbor_coord;
texture<TypeType, 1, hipReadModeElementType> global_texRef_neighbor_type;
void NeighborList::
clearDeviceNeighborList()
{
if ( mallocedDeviceNeighborList ){
hipFree (dnlist.data);
hipFree (dnlist.Nneighbor);
hipFree (dnlist.forceIndex);
mallocedDeviceNeighborList = false;
checkCUDAError ("NeighborList::clearDeviceNeighborList");
}
}
void NeighborList::
clearNonBondedForce ()
{
if (mallocedNonBondedForceTable == true){
hipFree (nbForceTable);
mallocedNonBondedForceTable = false;
}
}
void NeighborList::
clear()
{
clearDeviceNeighborList();
clearNonBondedForce();
unbindGlobalTexture ();
}
void NeighborList::
unbindGlobalTexture ()
{
if ( initedGlobalTexture ){
hipUnbindTexture(global_texRef_neighbor_coord);
hipUnbindTexture(global_texRef_neighbor_type);
initedGlobalTexture = false;
checkCUDAError ("NeighborList::unbindGlobalTexture");
}
}
void NeighborList::
bindGlobalTexture (const MDSystem & sys)
{
size_t sizetype = sizeof(TypeType) *sys.ddata.numMem;
size_t sizecoord = sizeof(CoordType) *sys.ddata.numMem;
hipBindTexture(0, global_texRef_neighbor_coord, sys.ddata.coord, sizecoord);
hipBindTexture(0, global_texRef_neighbor_type, sys.ddata.type, sizetype);
checkCUDAError ("NeighborList::init texture");
initedGlobalTexture = true;
}
NeighborList::~NeighborList()
{
clear();
}
static IndexType hroundUp4 (IndexType x)
{
if (x & 3 == 0){
return x;
}
else {
return ((x >> 2) + 1) << 2;
}
}
void NeighborList::
buildDeviceNeighborListCellList (const MDSystem & sys,
const CellList & clist)
{
dim3 cellBlockDim = clist.getCellBlockDim();
bool sharednbForceTable (true);
size_t buildDeviceNeighborList_DeviceCellList_sbuffSize =
sizeof(IndexType) * hroundUp4(cellBlockDim.x) +
sizeof(CoordType) * hroundUp4(cellBlockDim.x) +
sizeof(TypeType) * hroundUp4(cellBlockDim.x) +
sizeof(IndexType) * hroundUp4(nbForceTableLength);
if (buildDeviceNeighborList_DeviceCellList_sbuffSize >=
SystemSharedBuffSize - GlobalFunctionParamSizeLimit){
sharednbForceTable = false;
buildDeviceNeighborList_DeviceCellList_sbuffSize =
sizeof(IndexType) * hroundUp4(cellBlockDim.x) +
sizeof(CoordType) * hroundUp4(cellBlockDim.x) +
sizeof(TypeType) * hroundUp4(cellBlockDim.x);
}
hipLaunchKernelGGL(( buildDeviceNeighborList_DeviceCellList)
, dim3(clist.getCellGrimDim()), dim3(cellBlockDim),
( buildDeviceNeighborList_DeviceCellList)_sbuffSize, 0,
// <<<cellGridDim, myBlockDim>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.type,
sys.box,
clist.dclist,
dnlist,
nbForceTable,
NatomType,
sharednbForceTable,
err.ptr_de);
err.check("NeighborList::buildDeviceNeighborListCellList");
checkCUDAError ("NeighborList::buildDeviceNeighborListCellList");
}
void NeighborList::
buildDeviceNeighborListAllPair (const MDSystem & sys)
{
bool sharednbForceTable (true);
size_t buildDeviceNeighborList_AllPair_sbuffSize =
sizeof(IndexType) * hroundUp4(myBlockDim.x) +
sizeof(CoordType) * hroundUp4(myBlockDim.x) +
sizeof(TypeType) * hroundUp4(myBlockDim.x) +
sizeof(IndexType) * hroundUp4(nbForceTableLength);
if (buildDeviceNeighborList_AllPair_sbuffSize >=
SystemSharedBuffSize - GlobalFunctionParamSizeLimit){
sharednbForceTable = false;
buildDeviceNeighborList_AllPair_sbuffSize =
sizeof(IndexType) * hroundUp4(myBlockDim.x) +
sizeof(CoordType) * hroundUp4(myBlockDim.x) +
sizeof(TypeType) * hroundUp4(myBlockDim.x);
}
hipLaunchKernelGGL(( buildDeviceNeighborList_AllPair)
, dim3(atomGridDim), dim3(myBlockDim),
( buildDeviceNeighborList_AllPair)_sbuffSize, 0,
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.type,
sys.ddata.rcut,
sys.box,
dnlist,
nbForceTable,
NatomType,
sharednbForceTable,
err.ptr_de);
err.check("NeighborList::build, build neighbor list all pair");
checkCUDAError ("NeighborList::build, build neighbor list all pair");
}
void NeighborList::
initNonBondedInteraction (const SystemNonBondedInteraction & sysNbInter)
{
if (! sysNbInter.beBuilt()) {
throw MDExcptUnbuiltNonBondedInteraction ("NeighborList");
}
NatomType = sysNbInter.numberOfAtomTypes();
nbForceTableLength = sysNbInter.interactionTableSize();
hipMalloc ((void**)&nbForceTable,
nbForceTableLength * sizeof(IndexType));
hipMemcpy (nbForceTable,
sysNbInter.interactionTable(),
nbForceTableLength * sizeof(IndexType),
hipMemcpyHostToDevice);
checkCUDAError ("AtomNBForceTable::deviceInitTable");
mallocedNonBondedForceTable = true;
}
void NeighborList::
mallocDeviceNeighborList (const MDSystem & sys,
const ScalorType & DeviceNeighborListExpansion)
{
ScalorType density = sys.ddata.numAtom / (sys.box.size.x * sys.box.size.y * sys.box.size.z);
ScalorType expectedNumberInList
= 4./3. * M_PI * myrlist * myrlist * myrlist * density;
dnlist.listLength = IndexType(expectedNumberInList * DeviceNeighborListExpansion);
if (dnlist.listLength < 30){
dnlist.listLength = 30;
}
printf ("#@ length of the neighbor list is %d\n", dnlist.listLength);
hipMalloc ((void**)&(dnlist.data), sizeof(IndexType) * dnlist.stride * dnlist.listLength);
hipMalloc ((void**)&(dnlist.Nneighbor), sizeof(IndexType) * sys.ddata.numAtom);
hipMalloc ((void**)&(dnlist.forceIndex), sizeof(IndexType) * dnlist.stride * dnlist.listLength);
// reshuffle backup things
hipMalloc ((void**)&(bkdnlistData), sizeof(IndexType) * dnlist.stride * dnlist.listLength);
hipMalloc ((void**)&(bkdnlistNneighbor), sizeof(IndexType) * sys.ddata.numAtom);
hipMalloc ((void**)&(bkdnlistForceIndex), sizeof(IndexType) * dnlist.stride * dnlist.listLength);
checkCUDAError ("NeighborList::mallocDeviceNeighborList");
mallocedDeviceNeighborList = true;
}
void NeighborList::
reinit (const SystemNonBondedInteraction & sysNbInter,
const MDSystem & sys,
const ScalorType & rlist,
const ScalorType & rlistExten,
const IndexType & NTread,
const ScalorType & DeviceNeighborListExpansion)
{
myBlockDim.y = 1;
myBlockDim.z = 1;
myBlockDim.x = NTread;
IndexType nob;
if (sys.ddata.numAtom % myBlockDim.x == 0){
nob = sys.ddata.numAtom / myBlockDim.x;
} else {
nob = sys.ddata.numAtom / myBlockDim.x + 1;
}
atomGridDim = toGridDim (nob);
myrlist = rlist;
dnlist.rlist = myrlist;
dnlist.rlistExten = rlistExten;
dnlist.stride = sys.ddata.numAtom;
sumNeighbor.reinit (sys.ddata.numAtom, NThreadForSum);
hipMalloc ((void**) &sumNeighbor_dresult, sizeof(IndexType));
checkCUDAError ("NeighborList::reinit, sumNeighbor");
// init neighbor list
clearDeviceNeighborList ();
mallocDeviceNeighborList (sys, DeviceNeighborListExpansion);
clearNonBondedForce ();
initNonBondedInteraction (sysNbInter);
unbindGlobalTexture ();
bindGlobalTexture (sys);
//init shared memory size
}
void NeighborList::
rebuild (const MDSystem & sys,
const CellList & clist,
MDTimer * timer)
{
if (clist.isempty()){
if (timer != NULL) timer->tic(mdTimeBuildNeighborList);
// printf ("rlist is %f\n", dnlist.rlist);
buildDeviceNeighborListAllPair (sys);
if (timer != NULL) timer->toc(mdTimeBuildNeighborList);
}
else {
if (timer != NULL) timer->tic(mdTimeBuildNeighborList);
buildDeviceNeighborListCellList (sys, clist);
if (timer != NULL) timer->toc(mdTimeBuildNeighborList);
}
}
void NeighborList::
reshuffle (const IndexType * indexTable,
const IndexType & numAtom,
MDTimer *timer)
{
if (timer != NULL) timer->tic(mdTimeReshuffleSystem);
// Reshuffle_reshuffleDeviceCellList
// <<<cellGridDim, myBlockDim>>> (
// dclist.data, indexTable);
// hipMemcpy (bkbackupCoord, backupCoord,
// sizeof (CoordType) * numAtom,
// hipMemcpyDeviceToDevice);
// Reshuffle_reshuffleArray
// <<<atomGridDim, myBlockDim>>>
// (bkbackupCoord, numAtom, indexTable, backupCoord);
hipLaunchKernelGGL(( Reshuffle_backupDeviceNeighborList)
, dim3(atomGridDim), dim3(myBlockDim),
2 * myBlockDim.x * sizeof(IndexType), 0,
numAtom,
dnlist.data,
dnlist.forceIndex,
dnlist.stride,
dnlist.Nneighbor,
bkdnlistData,
bkdnlistForceIndex,
bkdnlistNneighbor);
checkCUDAError ("NeighborList::reshuffle backup");
hipLaunchKernelGGL(( Reshuffle_reshuffleDeviceNeighborList)
, dim3(atomGridDim), dim3(myBlockDim),
2 * myBlockDim.x * sizeof(IndexType), 0,
numAtom,
bkdnlistData,
bkdnlistForceIndex,
dnlist.stride,
bkdnlistNneighbor,
indexTable,
dnlist.data,
dnlist.forceIndex,
dnlist.Nneighbor);
checkCUDAError ("NeighborList::reshuffle reshuffle");
if (timer != NULL) timer->toc(mdTimeReshuffleSystem);
}
NeighborList::
NeighborList (const SystemNonBondedInteraction & sysNbInter,
const MDSystem & sys,
const ScalorType & rlist,
const ScalorType & rlistExten,
const IndexType & NTread,
const ScalorType & DeviceNeighborListExpansion)
: mallocedDeviceNeighborList (false),
mallocedNonBondedForceTable (false),
initedGlobalTexture (false)
{
reinit (sysNbInter, sys, rlist, rlistExten, NTread, DeviceNeighborListExpansion);
}
////////////////////////////////////////////////////////////
// for the reason of using texture, we place this function here. it
// should be placed in NeighborList.cu
////////////////////////////////////////////////////////////
using namespace RectangularBoxGeometry;
__device__ IndexType
shiftedD3toD1 (DeviceCellList clist,
RectangularBox box,
int ix,
int iy,
int iz,
ScalorType * shiftx ,
ScalorType * shifty,
ScalorType * shiftz)
{
int tmp;
ix += (tmp = -int(floorf(ix * clist.NCelli.x))) * clist.NCell.x;
*shiftx = tmp * box.size.x;
iy += (tmp = -int(floorf(iy * clist.NCelli.y))) * clist.NCell.y;
*shifty = tmp * box.size.y;
iz += (tmp = -int(floorf(iz * clist.NCelli.z))) * clist.NCell.z;
*shiftz = tmp * box.size.z;
return D3toD1 (clist.NCell, ix, iy, iz);
}
__global__ void
buildDeviceNeighborList_DeviceCellList (const IndexType numAtom,
const CoordType * coord,
const TypeType * type,
const RectangularBox box,
const DeviceCellList clist,
DeviceNeighborList nlist,
const IndexType * nbForceTable,
const IndexType NatomType,
const bool sharednbForceTable,
mdError_t * ptr_de )
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// set number of neighbor to 0
IndexType Nneighbor = 0;
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_neighbor_coord, ii);
reftype = tex1Dfetch(global_texRef_neighbor_type, ii);
#endif
}
ScalorType rlist = nlist.rlist;
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
IndexType * nbForceTableBuff = NULL;
IndexType nbForceTableLength = AtomNBForceTable::dCalDataLength(NatomType);
if (sharednbForceTable){
nbForceTableBuff = (IndexType *) &targettype[roundUp4(blockDim.x)];
cpyGlobalDataToSharedBuff (nbForceTable, nbForceTableBuff, nbForceTableLength);
}
__syncthreads();
// __shared__ volatile IndexType targetIndexes [MaxThreadsPerBlock];
// __shared__ volatile CoordType target [MaxThreadsPerBlock];
// __shared__ volatile TypeType targettype [MaxThreadsPerBlock];
// __shared__ volatile IndexType nbForceTableBuff [MaxNBForceTableBuffSize];
// IndexType nbForceTableLength = AtomNBForceTable::dCalDataLength(NatomType);
// if (sharednbForceTable){
// cpyGlobalDataToSharedBuff (nbForceTable, nbForceTableBuff, nbForceTableLength);
// }
// __syncthreads();
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
ScalorType rlist2 = rlist * rlist;
// loop over 27 neighbor cells
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
// if (threadIdx.x == 0){
// printf ("%d %d\n", bid, clist.numNeighborCell[bid]);
// }
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_neighbor_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_neighbor_type, targetIndexes[tid]);
}
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
if ((diffx*diffx+diffy*diffy+diffz*diffz) < rlist2 &&
targetIndexes[jj] != ii){
IndexType fidx;
if (sharednbForceTable){
fidx = AtomNBForceTable::calForceIndex (
nbForceTableBuff, NatomType, reftype, targettype[jj]);
}
else {
fidx = AtomNBForceTable::calForceIndex (
nbForceTable, NatomType, reftype, targettype[jj]);
}
// if (fidx != mdForceNULL) {
IndexType listIdx = Nneighbor * nlist.stride + ii;
nlist.data[listIdx] = targetIndexes[jj];
nlist.forceIndex[listIdx] = fidx;
Nneighbor ++;
// }
}
}
}
}
if (ii != MaxIndexValue) {
if (Nneighbor > nlist.listLength && ptr_de != NULL){
*ptr_de = mdErrorShortNeighborList;
return;
}
nlist.Nneighbor[ii] = Nneighbor;
// printf ("%d %d\n", ii, Nneighbor);
}
}
__global__ void
buildDeviceCellList_step1 (IndexType numAtom,
CoordType * coord,
IntScalorType * coordNoix,
IntScalorType * coordNoiy,
IntScalorType * coordNoiz,
RectangularBox box,
DeviceCellList clist,
IndexType * sendBuff,
IndexType * targetBuff,
mdError_t * ptr_de,
IndexType * erridx,
ScalorType * errsrc)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
extern __shared__ volatile IndexType sbuff[];
volatile IndexType * originalData = (volatile IndexType *) sbuff;
volatile IndexType * targetCellid = (volatile IndexType *) &originalData[blockDim.x];
// __shared__ volatile IndexType originalData[MaxThreadsPerBlock];
// __shared__ volatile IndexType targetCellid[MaxThreadsPerBlock];
// copy data from cell list
originalData[tid] = clist.data[bid*clist.stride + tid];
IndexType originalNumber = clist.numbers[bid];
// calculate the target cell
if (originalData[tid] != MaxIndexValue){
IndexType targetCelli, targetCellj, targetCellk;
IndexType thisid = originalData[tid];
#ifdef COMPILE_NO_TEX
ref = coord[thisid];
#else
CoordType ref (tex1Dfetch(global_texRef_neighbor_coord, thisid));
#endif
targetCelli = IndexType(ref.x * box.sizei.x * ScalorType (clist.NCell.x));
targetCellj = IndexType(ref.y * box.sizei.y * ScalorType (clist.NCell.y));
targetCellk = IndexType(ref.z * box.sizei.z * ScalorType (clist.NCell.z));
if (targetCelli == clist.NCell.x){
targetCelli -= clist.NCell.x;
coord[thisid].x -= box.size.x;
coordNoix[thisid] ++;
}
if (targetCellj == clist.NCell.y){
targetCellj -= clist.NCell.y;
coord[thisid].y -= box.size.y;
coordNoiy[thisid] ++;
}
if (targetCellk == clist.NCell.z){
targetCellk -= clist.NCell.z;
coord[thisid].z -= box.size.z;
coordNoiz[thisid] ++;
}
targetCellid[tid] = D3toD1 (clist.NCell, targetCelli, targetCellj, targetCellk);
if (ptr_de != NULL &&
(targetCelli >= clist.NCell.x ||
targetCellj >= clist.NCell.y ||
targetCellk >= clist.NCell.z)){
*ptr_de = mdErrorOverFlowCellIdx;
if (targetCelli >= IndexType(clist.NCell.x)){
*erridx = targetCelli;
*errsrc = ref.x;
// return;
}
if (targetCellj >= IndexType(clist.NCell.y)){
*erridx = targetCellj;
*errsrc = ref.y;
// return;
}
if (targetCellk >= IndexType(clist.NCell.z)){
*erridx = targetCellk;
*errsrc = ref.z;
// return;
}
}
}
else {
targetCellid[tid] = MaxIndexValue;
}
// mark particles to be send
IndexType mark = MaxIndexValue - (MaxIndexValue >> 1);
if (tid < originalNumber && targetCellid[tid] != bid){
originalData[tid] += mark;
}
// head sort
IndexType total1 = headSort (originalData, targetCellid);
IndexType total0 = blockDim.x - total1;
// unmark and copy to send buff
if (tid < originalNumber && targetCellid[tid] != bid){
sendBuff [bid*clist.stride + tid - total0] = originalData[tid] - mark;
targetBuff[bid*clist.stride + tid - total0] = targetCellid[tid];
originalData[tid] = MaxIndexValue;
}
__syncthreads();
// modify cell list
clist.data[bid*clist.stride + tid] = originalData[tid];
if (tid == 0) clist.numbers[bid] = total0;
}
__global__ void
buildDeviceCellList_initBuff (IndexType * sendBuff,
IndexType * targetBuff)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
sendBuff[ii] = MaxIndexValue;
targetBuff[ii] = 0;
}
__global__ void
buildDeviceCellList_clearBuff (IndexType * sendBuff)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType ii = threadIdx.x + bid * blockDim.x;
sendBuff[ii] = MaxIndexValue;
}
__global__ void
buildDeviceCellList_step2 (RectangularBox box,
DeviceCellList clist,
IndexType * sendBuff,
IndexType * targetBuff,
IndexType bitDeepth,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType thisid;
IndexType ii = 0;
IndexType buffPosi;
if (tid == 0){
while ((thisid = sendBuff[buffPosi = (bid*clist.stride + ii)]) != MaxIndexValue){
IndexType cellid = targetBuff[buffPosi];
IndexType tailIdx = atomicInc(&clist.numbers[cellid], blockDim.x);
if (tailIdx >= blockDim.x&& ptr_de != NULL) {
*ptr_de = mdErrorShortCellList;
return;
}
clist.data[cellid * clist.stride + tailIdx] = thisid;
sendBuff[buffPosi] = MaxIndexValue;
ii ++;
}
}
}
int NeighborList::
calSumNeighbor ()
{
hipMemcpy (sumNeighbor.buff, dnlist.Nneighbor, dnlist.stride * sizeof(IndexType),
hipMemcpyDeviceToDevice);
sumNeighbor.sumBuff (sumNeighbor_dresult, 0);
hipMemcpy (&sumNeighbor_hresult, sumNeighbor_dresult, sizeof(IndexType),
hipMemcpyDeviceToHost);
checkCUDAError ("NeighborList::calSumNeighbor");
return sumNeighbor_hresult;
}
| f9573bf45702097568972ce8ddf7541831f98939.cu | /**
* @file NeighborList_interface.cu
* @author Han Wang <han_wang@math.pku.edu.cn>
* @date Thu Nov 19 12:53:42 2009
*
* @brief Implementation of neighbor list
*
*
*/
#define DEVICE_CODE
#include "NeighborList_interface.h"
#include "Auxiliary.h"
#include "NeighborList.h"
#include <stdio.h>
#include "NonBondedInteraction.h"
#include "Reshuffle_interface.h"
/**
* these are textures for a fast reference of particle position.
*
*/
texture<CoordType, 1, cudaReadModeElementType> global_texRef_neighbor_coord;
texture<TypeType, 1, cudaReadModeElementType> global_texRef_neighbor_type;
void NeighborList::
clearDeviceNeighborList()
{
if ( mallocedDeviceNeighborList ){
cudaFree (dnlist.data);
cudaFree (dnlist.Nneighbor);
cudaFree (dnlist.forceIndex);
mallocedDeviceNeighborList = false;
checkCUDAError ("NeighborList::clearDeviceNeighborList");
}
}
void NeighborList::
clearNonBondedForce ()
{
if (mallocedNonBondedForceTable == true){
cudaFree (nbForceTable);
mallocedNonBondedForceTable = false;
}
}
void NeighborList::
clear()
{
clearDeviceNeighborList();
clearNonBondedForce();
unbindGlobalTexture ();
}
void NeighborList::
unbindGlobalTexture ()
{
if ( initedGlobalTexture ){
cudaUnbindTexture(global_texRef_neighbor_coord);
cudaUnbindTexture(global_texRef_neighbor_type);
initedGlobalTexture = false;
checkCUDAError ("NeighborList::unbindGlobalTexture");
}
}
void NeighborList::
bindGlobalTexture (const MDSystem & sys)
{
size_t sizetype = sizeof(TypeType) *sys.ddata.numMem;
size_t sizecoord = sizeof(CoordType) *sys.ddata.numMem;
cudaBindTexture(0, global_texRef_neighbor_coord, sys.ddata.coord, sizecoord);
cudaBindTexture(0, global_texRef_neighbor_type, sys.ddata.type, sizetype);
checkCUDAError ("NeighborList::init texture");
initedGlobalTexture = true;
}
NeighborList::~NeighborList()
{
clear();
}
static IndexType hroundUp4 (IndexType x)
{
if (x & 3 == 0){
return x;
}
else {
return ((x >> 2) + 1) << 2;
}
}
void NeighborList::
buildDeviceNeighborListCellList (const MDSystem & sys,
const CellList & clist)
{
dim3 cellBlockDim = clist.getCellBlockDim();
bool sharednbForceTable (true);
size_t buildDeviceNeighborList_DeviceCellList_sbuffSize =
sizeof(IndexType) * hroundUp4(cellBlockDim.x) +
sizeof(CoordType) * hroundUp4(cellBlockDim.x) +
sizeof(TypeType) * hroundUp4(cellBlockDim.x) +
sizeof(IndexType) * hroundUp4(nbForceTableLength);
if (buildDeviceNeighborList_DeviceCellList_sbuffSize >=
SystemSharedBuffSize - GlobalFunctionParamSizeLimit){
sharednbForceTable = false;
buildDeviceNeighborList_DeviceCellList_sbuffSize =
sizeof(IndexType) * hroundUp4(cellBlockDim.x) +
sizeof(CoordType) * hroundUp4(cellBlockDim.x) +
sizeof(TypeType) * hroundUp4(cellBlockDim.x);
}
buildDeviceNeighborList_DeviceCellList
<<<clist.getCellGrimDim(), cellBlockDim,
buildDeviceNeighborList_DeviceCellList_sbuffSize>>> (
// <<<cellGridDim, myBlockDim>>> (
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.type,
sys.box,
clist.dclist,
dnlist,
nbForceTable,
NatomType,
sharednbForceTable,
err.ptr_de);
err.check("NeighborList::buildDeviceNeighborListCellList");
checkCUDAError ("NeighborList::buildDeviceNeighborListCellList");
}
void NeighborList::
buildDeviceNeighborListAllPair (const MDSystem & sys)
{
bool sharednbForceTable (true);
size_t buildDeviceNeighborList_AllPair_sbuffSize =
sizeof(IndexType) * hroundUp4(myBlockDim.x) +
sizeof(CoordType) * hroundUp4(myBlockDim.x) +
sizeof(TypeType) * hroundUp4(myBlockDim.x) +
sizeof(IndexType) * hroundUp4(nbForceTableLength);
if (buildDeviceNeighborList_AllPair_sbuffSize >=
SystemSharedBuffSize - GlobalFunctionParamSizeLimit){
sharednbForceTable = false;
buildDeviceNeighborList_AllPair_sbuffSize =
sizeof(IndexType) * hroundUp4(myBlockDim.x) +
sizeof(CoordType) * hroundUp4(myBlockDim.x) +
sizeof(TypeType) * hroundUp4(myBlockDim.x);
}
buildDeviceNeighborList_AllPair
<<<atomGridDim, myBlockDim,
buildDeviceNeighborList_AllPair_sbuffSize>>>(
sys.ddata.numAtom,
sys.ddata.coord,
sys.ddata.type,
sys.ddata.rcut,
sys.box,
dnlist,
nbForceTable,
NatomType,
sharednbForceTable,
err.ptr_de);
err.check("NeighborList::build, build neighbor list all pair");
checkCUDAError ("NeighborList::build, build neighbor list all pair");
}
void NeighborList::
initNonBondedInteraction (const SystemNonBondedInteraction & sysNbInter)
{
if (! sysNbInter.beBuilt()) {
throw MDExcptUnbuiltNonBondedInteraction ("NeighborList");
}
NatomType = sysNbInter.numberOfAtomTypes();
nbForceTableLength = sysNbInter.interactionTableSize();
cudaMalloc ((void**)&nbForceTable,
nbForceTableLength * sizeof(IndexType));
cudaMemcpy (nbForceTable,
sysNbInter.interactionTable(),
nbForceTableLength * sizeof(IndexType),
cudaMemcpyHostToDevice);
checkCUDAError ("AtomNBForceTable::deviceInitTable");
mallocedNonBondedForceTable = true;
}
void NeighborList::
mallocDeviceNeighborList (const MDSystem & sys,
const ScalorType & DeviceNeighborListExpansion)
{
ScalorType density = sys.ddata.numAtom / (sys.box.size.x * sys.box.size.y * sys.box.size.z);
ScalorType expectedNumberInList
= 4./3. * M_PI * myrlist * myrlist * myrlist * density;
dnlist.listLength = IndexType(expectedNumberInList * DeviceNeighborListExpansion);
if (dnlist.listLength < 30){
dnlist.listLength = 30;
}
printf ("#@ length of the neighbor list is %d\n", dnlist.listLength);
cudaMalloc ((void**)&(dnlist.data), sizeof(IndexType) * dnlist.stride * dnlist.listLength);
cudaMalloc ((void**)&(dnlist.Nneighbor), sizeof(IndexType) * sys.ddata.numAtom);
cudaMalloc ((void**)&(dnlist.forceIndex), sizeof(IndexType) * dnlist.stride * dnlist.listLength);
// reshuffle backup things
cudaMalloc ((void**)&(bkdnlistData), sizeof(IndexType) * dnlist.stride * dnlist.listLength);
cudaMalloc ((void**)&(bkdnlistNneighbor), sizeof(IndexType) * sys.ddata.numAtom);
cudaMalloc ((void**)&(bkdnlistForceIndex), sizeof(IndexType) * dnlist.stride * dnlist.listLength);
checkCUDAError ("NeighborList::mallocDeviceNeighborList");
mallocedDeviceNeighborList = true;
}
void NeighborList::
reinit (const SystemNonBondedInteraction & sysNbInter,
const MDSystem & sys,
const ScalorType & rlist,
const ScalorType & rlistExten,
const IndexType & NTread,
const ScalorType & DeviceNeighborListExpansion)
{
myBlockDim.y = 1;
myBlockDim.z = 1;
myBlockDim.x = NTread;
IndexType nob;
if (sys.ddata.numAtom % myBlockDim.x == 0){
nob = sys.ddata.numAtom / myBlockDim.x;
} else {
nob = sys.ddata.numAtom / myBlockDim.x + 1;
}
atomGridDim = toGridDim (nob);
myrlist = rlist;
dnlist.rlist = myrlist;
dnlist.rlistExten = rlistExten;
dnlist.stride = sys.ddata.numAtom;
sumNeighbor.reinit (sys.ddata.numAtom, NThreadForSum);
cudaMalloc ((void**) &sumNeighbor_dresult, sizeof(IndexType));
checkCUDAError ("NeighborList::reinit, sumNeighbor");
// init neighbor list
clearDeviceNeighborList ();
mallocDeviceNeighborList (sys, DeviceNeighborListExpansion);
clearNonBondedForce ();
initNonBondedInteraction (sysNbInter);
unbindGlobalTexture ();
bindGlobalTexture (sys);
//init shared memory size
}
void NeighborList::
rebuild (const MDSystem & sys,
const CellList & clist,
MDTimer * timer)
{
if (clist.isempty()){
if (timer != NULL) timer->tic(mdTimeBuildNeighborList);
// printf ("rlist is %f\n", dnlist.rlist);
buildDeviceNeighborListAllPair (sys);
if (timer != NULL) timer->toc(mdTimeBuildNeighborList);
}
else {
if (timer != NULL) timer->tic(mdTimeBuildNeighborList);
buildDeviceNeighborListCellList (sys, clist);
if (timer != NULL) timer->toc(mdTimeBuildNeighborList);
}
}
void NeighborList::
reshuffle (const IndexType * indexTable,
const IndexType & numAtom,
MDTimer *timer)
{
if (timer != NULL) timer->tic(mdTimeReshuffleSystem);
// Reshuffle_reshuffleDeviceCellList
// <<<cellGridDim, myBlockDim>>> (
// dclist.data, indexTable);
// cudaMemcpy (bkbackupCoord, backupCoord,
// sizeof (CoordType) * numAtom,
// cudaMemcpyDeviceToDevice);
// Reshuffle_reshuffleArray
// <<<atomGridDim, myBlockDim>>>
// (bkbackupCoord, numAtom, indexTable, backupCoord);
Reshuffle_backupDeviceNeighborList
<<<atomGridDim, myBlockDim,
2 * myBlockDim.x * sizeof(IndexType)>>> (
numAtom,
dnlist.data,
dnlist.forceIndex,
dnlist.stride,
dnlist.Nneighbor,
bkdnlistData,
bkdnlistForceIndex,
bkdnlistNneighbor);
checkCUDAError ("NeighborList::reshuffle backup");
Reshuffle_reshuffleDeviceNeighborList
<<<atomGridDim, myBlockDim,
2 * myBlockDim.x * sizeof(IndexType)>>> (
numAtom,
bkdnlistData,
bkdnlistForceIndex,
dnlist.stride,
bkdnlistNneighbor,
indexTable,
dnlist.data,
dnlist.forceIndex,
dnlist.Nneighbor);
checkCUDAError ("NeighborList::reshuffle reshuffle");
if (timer != NULL) timer->toc(mdTimeReshuffleSystem);
}
NeighborList::
NeighborList (const SystemNonBondedInteraction & sysNbInter,
const MDSystem & sys,
const ScalorType & rlist,
const ScalorType & rlistExten,
const IndexType & NTread,
const ScalorType & DeviceNeighborListExpansion)
: mallocedDeviceNeighborList (false),
mallocedNonBondedForceTable (false),
initedGlobalTexture (false)
{
reinit (sysNbInter, sys, rlist, rlistExten, NTread, DeviceNeighborListExpansion);
}
////////////////////////////////////////////////////////////
// for the reason of using texture, we place this function here. it
// should be placed in NeighborList.cu
////////////////////////////////////////////////////////////
using namespace RectangularBoxGeometry;
__device__ IndexType
shiftedD3toD1 (DeviceCellList clist,
RectangularBox box,
int ix,
int iy,
int iz,
ScalorType * shiftx ,
ScalorType * shifty,
ScalorType * shiftz)
{
int tmp;
ix += (tmp = -int(floorf(ix * clist.NCelli.x))) * clist.NCell.x;
*shiftx = tmp * box.size.x;
iy += (tmp = -int(floorf(iy * clist.NCelli.y))) * clist.NCell.y;
*shifty = tmp * box.size.y;
iz += (tmp = -int(floorf(iz * clist.NCelli.z))) * clist.NCell.z;
*shiftz = tmp * box.size.z;
return D3toD1 (clist.NCell, ix, iy, iz);
}
__global__ void
buildDeviceNeighborList_DeviceCellList (const IndexType numAtom,
const CoordType * coord,
const TypeType * type,
const RectangularBox box,
const DeviceCellList clist,
DeviceNeighborList nlist,
const IndexType * nbForceTable,
const IndexType NatomType,
const bool sharednbForceTable,
mdError_t * ptr_de )
{
// RectangularBoxGeometry::normalizeSystem (box, &ddata);
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType bidx, bidy, bidz;
D1toD3 (clist.NCell, bid, bidx, bidy, bidz);
// set number of neighbor to 0
IndexType Nneighbor = 0;
// load index
IndexType ii = getDeviceCellListData (clist, bid, tid);
// load iith coordinate // use texturefetch instead
CoordType ref;
TypeType reftype;
if (ii != MaxIndexValue){
#ifdef COMPILE_NO_TEX
ref = coord[ii];
reftype = type[ii];
#else
ref = tex1Dfetch (global_texRef_neighbor_coord, ii);
reftype = tex1Dfetch(global_texRef_neighbor_type, ii);
#endif
}
ScalorType rlist = nlist.rlist;
// the target index and coordinates are shared
extern __shared__ volatile char pub_sbuff[];
volatile IndexType * targetIndexes =
(volatile IndexType *) pub_sbuff;
CoordType * target =
(CoordType *) &targetIndexes[roundUp4(blockDim.x)];
volatile TypeType * targettype =
(volatile TypeType *) &target[roundUp4(blockDim.x)];
IndexType * nbForceTableBuff = NULL;
IndexType nbForceTableLength = AtomNBForceTable::dCalDataLength(NatomType);
if (sharednbForceTable){
nbForceTableBuff = (IndexType *) &targettype[roundUp4(blockDim.x)];
cpyGlobalDataToSharedBuff (nbForceTable, nbForceTableBuff, nbForceTableLength);
}
__syncthreads();
// __shared__ volatile IndexType targetIndexes [MaxThreadsPerBlock];
// __shared__ volatile CoordType target [MaxThreadsPerBlock];
// __shared__ volatile TypeType targettype [MaxThreadsPerBlock];
// __shared__ volatile IndexType nbForceTableBuff [MaxNBForceTableBuffSize];
// IndexType nbForceTableLength = AtomNBForceTable::dCalDataLength(NatomType);
// if (sharednbForceTable){
// cpyGlobalDataToSharedBuff (nbForceTable, nbForceTableBuff, nbForceTableLength);
// }
// __syncthreads();
bool oneCellX(false), oneCellY(false), oneCellZ(false);
if (clist.NCell.x == 1) oneCellX = true;
if (clist.NCell.y == 1) oneCellY = true;
if (clist.NCell.z == 1) oneCellZ = true;
ScalorType rlist2 = rlist * rlist;
// loop over 27 neighbor cells
for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){
// if (threadIdx.x == 0){
// printf ("%d %d\n", bid, clist.numNeighborCell[bid]);
// }
__syncthreads();
IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i);
CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i);
CoordType shift;
shift.x = shiftNoi.x * box.size.x;
shift.y = shiftNoi.y * box.size.y;
shift.z = shiftNoi.z * box.size.z;
targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid);
if (targetIndexes[tid] != MaxIndexValue){
target[tid] = tex1Dfetch(global_texRef_neighbor_coord, targetIndexes[tid]);
targettype[tid] = tex1Dfetch(global_texRef_neighbor_type, targetIndexes[tid]);
}
__syncthreads();
// find neighbor
if (ii != MaxIndexValue){
for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){
ScalorType diffx = target[jj].x - shift.x - ref.x;
ScalorType diffy = target[jj].y - shift.y - ref.y;
ScalorType diffz = target[jj].z - shift.z - ref.z;
if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx);
if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy);
if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz);
if ((diffx*diffx+diffy*diffy+diffz*diffz) < rlist2 &&
targetIndexes[jj] != ii){
IndexType fidx;
if (sharednbForceTable){
fidx = AtomNBForceTable::calForceIndex (
nbForceTableBuff, NatomType, reftype, targettype[jj]);
}
else {
fidx = AtomNBForceTable::calForceIndex (
nbForceTable, NatomType, reftype, targettype[jj]);
}
// if (fidx != mdForceNULL) {
IndexType listIdx = Nneighbor * nlist.stride + ii;
nlist.data[listIdx] = targetIndexes[jj];
nlist.forceIndex[listIdx] = fidx;
Nneighbor ++;
// }
}
}
}
}
if (ii != MaxIndexValue) {
if (Nneighbor > nlist.listLength && ptr_de != NULL){
*ptr_de = mdErrorShortNeighborList;
return;
}
nlist.Nneighbor[ii] = Nneighbor;
// printf ("%d %d\n", ii, Nneighbor);
}
}
__global__ void
buildDeviceCellList_step1 (IndexType numAtom,
CoordType * coord,
IntScalorType * coordNoix,
IntScalorType * coordNoiy,
IntScalorType * coordNoiz,
RectangularBox box,
DeviceCellList clist,
IndexType * sendBuff,
IndexType * targetBuff,
mdError_t * ptr_de,
IndexType * erridx,
ScalorType * errsrc)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
extern __shared__ volatile IndexType sbuff[];
volatile IndexType * originalData = (volatile IndexType *) sbuff;
volatile IndexType * targetCellid = (volatile IndexType *) &originalData[blockDim.x];
// __shared__ volatile IndexType originalData[MaxThreadsPerBlock];
// __shared__ volatile IndexType targetCellid[MaxThreadsPerBlock];
// copy data from cell list
originalData[tid] = clist.data[bid*clist.stride + tid];
IndexType originalNumber = clist.numbers[bid];
// calculate the target cell
if (originalData[tid] != MaxIndexValue){
IndexType targetCelli, targetCellj, targetCellk;
IndexType thisid = originalData[tid];
#ifdef COMPILE_NO_TEX
ref = coord[thisid];
#else
CoordType ref (tex1Dfetch(global_texRef_neighbor_coord, thisid));
#endif
targetCelli = IndexType(ref.x * box.sizei.x * ScalorType (clist.NCell.x));
targetCellj = IndexType(ref.y * box.sizei.y * ScalorType (clist.NCell.y));
targetCellk = IndexType(ref.z * box.sizei.z * ScalorType (clist.NCell.z));
if (targetCelli == clist.NCell.x){
targetCelli -= clist.NCell.x;
coord[thisid].x -= box.size.x;
coordNoix[thisid] ++;
}
if (targetCellj == clist.NCell.y){
targetCellj -= clist.NCell.y;
coord[thisid].y -= box.size.y;
coordNoiy[thisid] ++;
}
if (targetCellk == clist.NCell.z){
targetCellk -= clist.NCell.z;
coord[thisid].z -= box.size.z;
coordNoiz[thisid] ++;
}
targetCellid[tid] = D3toD1 (clist.NCell, targetCelli, targetCellj, targetCellk);
if (ptr_de != NULL &&
(targetCelli >= clist.NCell.x ||
targetCellj >= clist.NCell.y ||
targetCellk >= clist.NCell.z)){
*ptr_de = mdErrorOverFlowCellIdx;
if (targetCelli >= IndexType(clist.NCell.x)){
*erridx = targetCelli;
*errsrc = ref.x;
// return;
}
if (targetCellj >= IndexType(clist.NCell.y)){
*erridx = targetCellj;
*errsrc = ref.y;
// return;
}
if (targetCellk >= IndexType(clist.NCell.z)){
*erridx = targetCellk;
*errsrc = ref.z;
// return;
}
}
}
else {
targetCellid[tid] = MaxIndexValue;
}
// mark particles to be send
IndexType mark = MaxIndexValue - (MaxIndexValue >> 1);
if (tid < originalNumber && targetCellid[tid] != bid){
originalData[tid] += mark;
}
// head sort
IndexType total1 = headSort (originalData, targetCellid);
IndexType total0 = blockDim.x - total1;
// unmark and copy to send buff
if (tid < originalNumber && targetCellid[tid] != bid){
sendBuff [bid*clist.stride + tid - total0] = originalData[tid] - mark;
targetBuff[bid*clist.stride + tid - total0] = targetCellid[tid];
originalData[tid] = MaxIndexValue;
}
__syncthreads();
// modify cell list
clist.data[bid*clist.stride + tid] = originalData[tid];
if (tid == 0) clist.numbers[bid] = total0;
}
__global__ void
buildDeviceCellList_initBuff (IndexType * sendBuff,
IndexType * targetBuff)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType ii = tid + bid * blockDim.x;
sendBuff[ii] = MaxIndexValue;
targetBuff[ii] = 0;
}
__global__ void
buildDeviceCellList_clearBuff (IndexType * sendBuff)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType ii = threadIdx.x + bid * blockDim.x;
sendBuff[ii] = MaxIndexValue;
}
__global__ void
buildDeviceCellList_step2 (RectangularBox box,
DeviceCellList clist,
IndexType * sendBuff,
IndexType * targetBuff,
IndexType bitDeepth,
mdError_t * ptr_de)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType tid = threadIdx.x;
IndexType thisid;
IndexType ii = 0;
IndexType buffPosi;
if (tid == 0){
while ((thisid = sendBuff[buffPosi = (bid*clist.stride + ii)]) != MaxIndexValue){
IndexType cellid = targetBuff[buffPosi];
IndexType tailIdx = atomicInc(&clist.numbers[cellid], blockDim.x);
if (tailIdx >= blockDim.x&& ptr_de != NULL) {
*ptr_de = mdErrorShortCellList;
return;
}
clist.data[cellid * clist.stride + tailIdx] = thisid;
sendBuff[buffPosi] = MaxIndexValue;
ii ++;
}
}
}
int NeighborList::
calSumNeighbor ()
{
cudaMemcpy (sumNeighbor.buff, dnlist.Nneighbor, dnlist.stride * sizeof(IndexType),
cudaMemcpyDeviceToDevice);
sumNeighbor.sumBuff (sumNeighbor_dresult, 0);
cudaMemcpy (&sumNeighbor_hresult, sumNeighbor_dresult, sizeof(IndexType),
cudaMemcpyDeviceToHost);
checkCUDAError ("NeighborList::calSumNeighbor");
return sumNeighbor_hresult;
}
|
05d19b4bf24ea978a47a5c94b4dd5f926624fc65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void copy_kernel(size_t sz, float_t* src, float_t* dest)
{
size_t index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < sz)
{
dest[index]=src[index];
}
} | 05d19b4bf24ea978a47a5c94b4dd5f926624fc65.cu | #include "includes.h"
__global__ void copy_kernel(size_t sz, float_t* src, float_t* dest)
{
size_t index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < sz)
{
dest[index]=src[index];
}
} |
060a6ca2a6313cb86a54dfe9ac8cfbc1312b06ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sys/time.h>
#include <cstdio>
#include "jacobi.h"
#include "error_checks.h"
// Change this to 0 if CPU reference result is not needed
#define COMPUTE_CPU_REFERENCE 1
#define MAX_ITERATIONS 3000
// CPU kernel
void sweepCPU(double* phi, const double *phiPrev, const double *source,
double h2, int N)
{
int i, j;
int index, i1, i2, i3, i4;
for (j = 1; j < N-1; j++) {
for (i = 1; i < N-1; i++) {
index = i + j*N;
i1 = (i-1) + j * N;
i2 = (i+1) + j * N;
i3 = i + (j-1) * N;
i4 = i + (j+1) * N;
phi[index] = 0.25 * (phiPrev[i1] + phiPrev[i2] +
phiPrev[i3] + phiPrev[i4] -
h2 * source[index]);
}
}
}
// GPU kernel
__global__
void sweepGPU(double *phi, const double *phiPrev, const double *source,
double h2, int N)
{
// #error Add here the GPU version of the update routine (see sweepCPU above)
int i, j;
int index, i1, i2, i3, i4;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
index = i + j * N;
if (i > 0 && j > 0 && i < N-1 && j < N-1){ // be careful!
i1 = (i-1) + j * N;
i2 = (i+1) + j * N;
i3 = i + (j-1) * N;
i4 = i + (j+1) * N;
phi[index] = 0.25 * (phiPrev[i1] + phiPrev[i2] +
phiPrev[i3] + phiPrev[i4] -
h2 * source[index]);
}
}
double compareArrays(const double *a, const double *b, int N)
{
double error = 0.0;
int i;
for (i = 0; i < N*N; i++) {
error += fabs(a[i] - b[i]);
}
return error/(N*N);
}
double diffCPU(const double *phi, const double *phiPrev, int N)
{
int i;
double sum = 0;
double diffsum = 0;
for (i = 0; i < N*N; i++) {
diffsum += (phi[i] - phiPrev[i]) * (phi[i] - phiPrev[i]);
sum += phi[i] * phi[i];
}
return sqrt(diffsum/sum);
}
int main()
{
timeval t1, t2; // Structs for timing
const int N = 512;
double h = 1.0 / (N - 1);
int iterations;
const double tolerance = 5e-4; // Stopping condition
int i, j, index;
const int blocksize = 16;
double *phi = new double[N*N];
double *phiPrev = new double[N*N];
double *source = new double[N*N];
double *phi_cuda = new double[N*N];
double *phi_d, *phiPrev_d, *source_d;
// Size of the arrays in bytes
const int size = N*N*sizeof(double);
double diff;
// Source initialization
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
double x, y;
x = (i - N / 2) * h;
y = (j - N / 2) * h;
index = j + i * N;
if (((x - 0.25) * (x - 0.25) + y * y) < 0.1 * 0.1)
source[index] = 1e10*h*h;
else if (((x + 0.25) * (x + 0.25) + y * y) < 0.1 * 0.1)
source[index] = -1e10*h*h;
else
source[index] = 0.0;
}
}
CUDA_CHECK( hipMalloc( (void**)&source_d, size) );
CUDA_CHECK( hipMemcpy(source_d, source, size, hipMemcpyHostToDevice) );
// Reset values to zero
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
index = j + i * N;
phi[index] = 0.0;
phiPrev[index] = 0.0;
}
}
CUDA_CHECK( hipMalloc( (void**)&phi_d, size) );
CUDA_CHECK( hipMalloc( (void**)&phiPrev_d, size) );
CUDA_CHECK( hipMemcpy(phi_d, phi, size, hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(phiPrev_d, phiPrev, size, hipMemcpyHostToDevice) );
// CPU version
if(COMPUTE_CPU_REFERENCE) {
gettimeofday(&t1, NULL);
// Do sweeps untill difference is under the tolerance
diff = tolerance * 2;
iterations = 0;
while (diff > tolerance && iterations < MAX_ITERATIONS) {
sweepCPU(phiPrev, phi, source, h * h, N);
sweepCPU(phi, phiPrev, source, h * h, N);
iterations += 2;
if (iterations % 100 == 0) {
diff = diffCPU(phi, phiPrev, N);
printf("%d %g\n", iterations, diff);
}
}
gettimeofday(&t2, NULL);
printf("CPU Jacobi: %g seconds, %d iterations\n",
t2.tv_sec - t1.tv_sec +
(t2.tv_usec - t1.tv_usec) / 1.0e6, iterations);
}
// GPU version
dim3 dimBlock(blocksize, blocksize);
dim3 dimGrid((N + blocksize - 1) / blocksize, (N + blocksize - 1) / blocksize);
//do sweeps until diff under tolerance
diff = tolerance * 2;
iterations = 0;
gettimeofday(&t1, NULL);
while (diff > tolerance && iterations < MAX_ITERATIONS) {
// See above how the CPU update kernel is called
// and implement similar calling sequence for the GPU code
//// Add routines here
// #error Add GPU kernel calls here (see CPU version above)
hipLaunchKernelGGL(( sweepGPU) , dim3(dimGrid), dim3(dimBlock) , 0, 0, phiPrev_d, phi_d, source_d, h * h, N);
hipLaunchKernelGGL(( sweepGPU) , dim3(dimGrid), dim3(dimBlock) , 0, 0, phi_d, phiPrev_d, source_d, h * h, N);
iterations += 2;
if (iterations % 100 == 0) {
// diffGPU is defined in the header file, it uses
// Thrust library for reduction computation
diff = diffGPU<double>(phiPrev_d, phi_d, N);
CHECK_ERROR_MSG("Difference computation");
printf("%d %g\n", iterations, diff);
}
}
//// Add here the routine to copy back the results
// #error Copy back the results
CUDA_CHECK( hipMemcpy(phi_cuda, phi_d, size, hipMemcpyDeviceToHost) );
gettimeofday(&t2, NULL);
printf("GPU Jacobi: %g seconds, %d iterations\n",
t2.tv_sec - t1.tv_sec +
(t2.tv_usec - t1.tv_usec) / 1.0e6, iterations);
//// Add here the clean up code for all allocated CUDA resources
// #error Add here the clean up code
CUDA_CHECK( hipFree(phi_d) );
CUDA_CHECK( hipFree(phiPrev_d) );
CUDA_CHECK( hipFree(source_d) );
if (COMPUTE_CPU_REFERENCE) {
printf("Average difference is %g\n", compareArrays(phi, phi_cuda, N));
}
delete[] phi;
delete[] phi_cuda;
delete[] phiPrev;
delete[] source;
return EXIT_SUCCESS;
} | 060a6ca2a6313cb86a54dfe9ac8cfbc1312b06ee.cu | #include <sys/time.h>
#include <cstdio>
#include "jacobi.h"
#include "error_checks.h"
// Change this to 0 if CPU reference result is not needed
#define COMPUTE_CPU_REFERENCE 1
#define MAX_ITERATIONS 3000
// CPU kernel
void sweepCPU(double* phi, const double *phiPrev, const double *source,
double h2, int N)
{
int i, j;
int index, i1, i2, i3, i4;
for (j = 1; j < N-1; j++) {
for (i = 1; i < N-1; i++) {
index = i + j*N;
i1 = (i-1) + j * N;
i2 = (i+1) + j * N;
i3 = i + (j-1) * N;
i4 = i + (j+1) * N;
phi[index] = 0.25 * (phiPrev[i1] + phiPrev[i2] +
phiPrev[i3] + phiPrev[i4] -
h2 * source[index]);
}
}
}
// GPU kernel
__global__
void sweepGPU(double *phi, const double *phiPrev, const double *source,
double h2, int N)
{
// #error Add here the GPU version of the update routine (see sweepCPU above)
int i, j;
int index, i1, i2, i3, i4;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
index = i + j * N;
if (i > 0 && j > 0 && i < N-1 && j < N-1){ // be careful!
i1 = (i-1) + j * N;
i2 = (i+1) + j * N;
i3 = i + (j-1) * N;
i4 = i + (j+1) * N;
phi[index] = 0.25 * (phiPrev[i1] + phiPrev[i2] +
phiPrev[i3] + phiPrev[i4] -
h2 * source[index]);
}
}
double compareArrays(const double *a, const double *b, int N)
{
double error = 0.0;
int i;
for (i = 0; i < N*N; i++) {
error += fabs(a[i] - b[i]);
}
return error/(N*N);
}
double diffCPU(const double *phi, const double *phiPrev, int N)
{
int i;
double sum = 0;
double diffsum = 0;
for (i = 0; i < N*N; i++) {
diffsum += (phi[i] - phiPrev[i]) * (phi[i] - phiPrev[i]);
sum += phi[i] * phi[i];
}
return sqrt(diffsum/sum);
}
int main()
{
timeval t1, t2; // Structs for timing
const int N = 512;
double h = 1.0 / (N - 1);
int iterations;
const double tolerance = 5e-4; // Stopping condition
int i, j, index;
const int blocksize = 16;
double *phi = new double[N*N];
double *phiPrev = new double[N*N];
double *source = new double[N*N];
double *phi_cuda = new double[N*N];
double *phi_d, *phiPrev_d, *source_d;
// Size of the arrays in bytes
const int size = N*N*sizeof(double);
double diff;
// Source initialization
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
double x, y;
x = (i - N / 2) * h;
y = (j - N / 2) * h;
index = j + i * N;
if (((x - 0.25) * (x - 0.25) + y * y) < 0.1 * 0.1)
source[index] = 1e10*h*h;
else if (((x + 0.25) * (x + 0.25) + y * y) < 0.1 * 0.1)
source[index] = -1e10*h*h;
else
source[index] = 0.0;
}
}
CUDA_CHECK( cudaMalloc( (void**)&source_d, size) );
CUDA_CHECK( cudaMemcpy(source_d, source, size, cudaMemcpyHostToDevice) );
// Reset values to zero
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
index = j + i * N;
phi[index] = 0.0;
phiPrev[index] = 0.0;
}
}
CUDA_CHECK( cudaMalloc( (void**)&phi_d, size) );
CUDA_CHECK( cudaMalloc( (void**)&phiPrev_d, size) );
CUDA_CHECK( cudaMemcpy(phi_d, phi, size, cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(phiPrev_d, phiPrev, size, cudaMemcpyHostToDevice) );
// CPU version
if(COMPUTE_CPU_REFERENCE) {
gettimeofday(&t1, NULL);
// Do sweeps untill difference is under the tolerance
diff = tolerance * 2;
iterations = 0;
while (diff > tolerance && iterations < MAX_ITERATIONS) {
sweepCPU(phiPrev, phi, source, h * h, N);
sweepCPU(phi, phiPrev, source, h * h, N);
iterations += 2;
if (iterations % 100 == 0) {
diff = diffCPU(phi, phiPrev, N);
printf("%d %g\n", iterations, diff);
}
}
gettimeofday(&t2, NULL);
printf("CPU Jacobi: %g seconds, %d iterations\n",
t2.tv_sec - t1.tv_sec +
(t2.tv_usec - t1.tv_usec) / 1.0e6, iterations);
}
// GPU version
dim3 dimBlock(blocksize, blocksize);
dim3 dimGrid((N + blocksize - 1) / blocksize, (N + blocksize - 1) / blocksize);
//do sweeps until diff under tolerance
diff = tolerance * 2;
iterations = 0;
gettimeofday(&t1, NULL);
while (diff > tolerance && iterations < MAX_ITERATIONS) {
// See above how the CPU update kernel is called
// and implement similar calling sequence for the GPU code
//// Add routines here
// #error Add GPU kernel calls here (see CPU version above)
sweepGPU <<< dimGrid, dimBlock >>> (phiPrev_d, phi_d, source_d, h * h, N);
sweepGPU <<< dimGrid, dimBlock >>> (phi_d, phiPrev_d, source_d, h * h, N);
iterations += 2;
if (iterations % 100 == 0) {
// diffGPU is defined in the header file, it uses
// Thrust library for reduction computation
diff = diffGPU<double>(phiPrev_d, phi_d, N);
CHECK_ERROR_MSG("Difference computation");
printf("%d %g\n", iterations, diff);
}
}
//// Add here the routine to copy back the results
// #error Copy back the results
CUDA_CHECK( cudaMemcpy(phi_cuda, phi_d, size, cudaMemcpyDeviceToHost) );
gettimeofday(&t2, NULL);
printf("GPU Jacobi: %g seconds, %d iterations\n",
t2.tv_sec - t1.tv_sec +
(t2.tv_usec - t1.tv_usec) / 1.0e6, iterations);
//// Add here the clean up code for all allocated CUDA resources
// #error Add here the clean up code
CUDA_CHECK( cudaFree(phi_d) );
CUDA_CHECK( cudaFree(phiPrev_d) );
CUDA_CHECK( cudaFree(source_d) );
if (COMPUTE_CPU_REFERENCE) {
printf("Average difference is %g\n", compareArrays(phi, phi_cuda, N));
}
delete[] phi;
delete[] phi_cuda;
delete[] phiPrev;
delete[] source;
return EXIT_SUCCESS;
} |
2f64df139d0b4cba2400a0ce0b64654094d588fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_20.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5672984461743,0.00129033409755676,0.779685695864660,0.779520797116827,0.000174877807622397,0.485080153205855,0.00294050217137296,0.999998347773897,1.93380546283175e-08,1.89130845114506e-05,0.999767401669784,1.00695706000729,0.999992992750444,4.84083139845398e-05,0.352299041137461,10.2546628191673,139.474807488311};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.4038672892215,0.000367608254336895,0.000123070235263446,0.000417517754191864,0.237777077660337,0.131017927849494,0.169563048009895,4.83724460632959,0.0144876831473433,1.80972776373976,1095.54078625056,0.000417877235006765,0.432451046382988,0.0121272289288792,0.00236477866465391,6.60789521910949e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| 2f64df139d0b4cba2400a0ce0b64654094d588fe.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_20.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5672984461743,0.00129033409755676,0.779685695864660,0.779520797116827,0.000174877807622397,0.485080153205855,0.00294050217137296,0.999998347773897,1.93380546283175e-08,1.89130845114506e-05,0.999767401669784,1.00695706000729,0.999992992750444,4.84083139845398e-05,0.352299041137461,10.2546628191673,139.474807488311};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.4038672892215,0.000367608254336895,0.000123070235263446,0.000417517754191864,0.237777077660337,0.131017927849494,0.169563048009895,4.83724460632959,0.0144876831473433,1.80972776373976,1095.54078625056,0.000417877235006765,0.432451046382988,0.0121272289288792,0.00236477866465391,6.60789521910949e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
3d6b4fe117a214e6143d229d168e15a2bb8fcfcb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
__global__ void _sobel_process_kernel_(unsigned char* d_src, unsigned char* d_dst, int row, int col)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int src_center = idy * col + idx;
if(idy >= row || idx >= col)
return;
int lidx = (idx <= 0)? 0 : idx - 1;
int ridx = (idx >= col - 1)? col - 1 : idx + 1;
int uidy = (idy <= 0)? 0 : idy - 1;
int didy = (idy >= row - 1)? row - 1 : idy + 1;
int src_left = idy * col + lidx;
int src_right = idy * col + ridx;
int src_up = uidy * col + idx;
int src_up_left = uidy * col + lidx;
int src_up_right = uidy * col + ridx;
int src_down = didy * col + idx;
int src_down_left = didy * col + lidx;
int src_down_right = didy * col + ridx;
float src_left_r = d_src[src_left];
float src_right_r = d_src[src_right];
float src_up_r = d_src[src_up];
float src_up_left_r = d_src[src_up_left];
float src_up_right_r = d_src[src_up_right];
float src_down_r = d_src[src_down];
float src_down_left_r = d_src[src_down_left];
float src_down_right_r = d_src[src_down_right];
float GX = 1 * src_up_right_r + 2 * src_right_r + 1 * src_down_right_r - 1 * src_up_left_r - 2 * src_left_r - 1 * src_down_left_r;
float GY = 1 * src_up_left_r + 2 * src_up_r + 1 * src_up_right_r - 1 * src_down_left_r - 2 * src_down_r - 1 * src_down_right_r;
float G = sqrt(pow(GX, 2) + pow(GY, 2));
unsigned char gray = static_cast<unsigned char>(G);
d_dst[src_center] = gray;
}
__global__ void _split_channel_kernel_(unsigned char* d_src, unsigned char* d_r, unsigned char* d_g, unsigned char* d_b, int row, int col)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int id = idy * col + idx;
if(id >= row * col)
return;
d_r[id] = d_src[id * 3 + 0];
d_g[id] = d_src[id * 3 + 1];
d_b[id] = d_src[id * 3 + 2];
}
extern void _sobel_process_(unsigned char* src, unsigned char* dst, int row, int col)
{
unsigned char* d_src = nullptr;
unsigned char* d_dst = nullptr;
const size_t ARRAY_BYTES = row * col * sizeof(unsigned char);
hipMalloc((void**) &d_src, ARRAY_BYTES);
hipMalloc((void**) &d_dst, ARRAY_BYTES);
hipMemcpy(d_src, src, ARRAY_BYTES, hipMemcpyHostToDevice);
dim3 threads(32, 32);
dim3 blocks(col / threads.x + 1, row / threads.y + 1);
hipLaunchKernelGGL(( _sobel_process_kernel_), dim3(blocks), dim3(threads), 0, 0, d_src, d_dst, row, col);
hipDeviceSynchronize();
hipMemcpy(dst, d_dst, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipFree(d_src);
hipFree(d_dst);
}
extern void _split_channel_(unsigned char* src, unsigned char* r, unsigned char* g, unsigned char* b, int row, int col)
{
unsigned char* d_src = nullptr;
unsigned char* d_r = nullptr;
unsigned char* d_g = nullptr;
unsigned char* d_b = nullptr;
const size_t ARRAY_BYTES = row * col * sizeof(unsigned char);
hipMalloc((void**) &d_src, ARRAY_BYTES * 3);
hipMalloc((void**) &d_r, ARRAY_BYTES);
hipMalloc((void**) &d_g, ARRAY_BYTES);
hipMalloc((void**) &d_b, ARRAY_BYTES);
hipMemcpy(d_src, src, ARRAY_BYTES * 3, hipMemcpyHostToDevice);
dim3 threads(1, 1);
dim3 blocks(col / threads.x + 1, row / threads.y + 1);
hipLaunchKernelGGL(( _split_channel_kernel_), dim3(blocks), dim3(threads), 0, 0, d_src, d_r, d_g, d_b, row, col);
hipDeviceSynchronize();
hipMemcpy(r, d_r, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(g, d_g, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(b, d_b, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipFree(d_src);
hipFree(d_r);
hipFree(d_g);
hipFree(d_b);
} | 3d6b4fe117a214e6143d229d168e15a2bb8fcfcb.cu | #include <iostream>
__global__ void _sobel_process_kernel_(unsigned char* d_src, unsigned char* d_dst, int row, int col)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int src_center = idy * col + idx;
if(idy >= row || idx >= col)
return;
int lidx = (idx <= 0)? 0 : idx - 1;
int ridx = (idx >= col - 1)? col - 1 : idx + 1;
int uidy = (idy <= 0)? 0 : idy - 1;
int didy = (idy >= row - 1)? row - 1 : idy + 1;
int src_left = idy * col + lidx;
int src_right = idy * col + ridx;
int src_up = uidy * col + idx;
int src_up_left = uidy * col + lidx;
int src_up_right = uidy * col + ridx;
int src_down = didy * col + idx;
int src_down_left = didy * col + lidx;
int src_down_right = didy * col + ridx;
float src_left_r = d_src[src_left];
float src_right_r = d_src[src_right];
float src_up_r = d_src[src_up];
float src_up_left_r = d_src[src_up_left];
float src_up_right_r = d_src[src_up_right];
float src_down_r = d_src[src_down];
float src_down_left_r = d_src[src_down_left];
float src_down_right_r = d_src[src_down_right];
float GX = 1 * src_up_right_r + 2 * src_right_r + 1 * src_down_right_r - 1 * src_up_left_r - 2 * src_left_r - 1 * src_down_left_r;
float GY = 1 * src_up_left_r + 2 * src_up_r + 1 * src_up_right_r - 1 * src_down_left_r - 2 * src_down_r - 1 * src_down_right_r;
float G = sqrt(pow(GX, 2) + pow(GY, 2));
unsigned char gray = static_cast<unsigned char>(G);
d_dst[src_center] = gray;
}
__global__ void _split_channel_kernel_(unsigned char* d_src, unsigned char* d_r, unsigned char* d_g, unsigned char* d_b, int row, int col)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int id = idy * col + idx;
if(id >= row * col)
return;
d_r[id] = d_src[id * 3 + 0];
d_g[id] = d_src[id * 3 + 1];
d_b[id] = d_src[id * 3 + 2];
}
extern void _sobel_process_(unsigned char* src, unsigned char* dst, int row, int col)
{
unsigned char* d_src = nullptr;
unsigned char* d_dst = nullptr;
const size_t ARRAY_BYTES = row * col * sizeof(unsigned char);
cudaMalloc((void**) &d_src, ARRAY_BYTES);
cudaMalloc((void**) &d_dst, ARRAY_BYTES);
cudaMemcpy(d_src, src, ARRAY_BYTES, cudaMemcpyHostToDevice);
dim3 threads(32, 32);
dim3 blocks(col / threads.x + 1, row / threads.y + 1);
_sobel_process_kernel_<<<blocks, threads>>>(d_src, d_dst, row, col);
cudaDeviceSynchronize();
cudaMemcpy(dst, d_dst, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaFree(d_src);
cudaFree(d_dst);
}
extern void _split_channel_(unsigned char* src, unsigned char* r, unsigned char* g, unsigned char* b, int row, int col)
{
unsigned char* d_src = nullptr;
unsigned char* d_r = nullptr;
unsigned char* d_g = nullptr;
unsigned char* d_b = nullptr;
const size_t ARRAY_BYTES = row * col * sizeof(unsigned char);
cudaMalloc((void**) &d_src, ARRAY_BYTES * 3);
cudaMalloc((void**) &d_r, ARRAY_BYTES);
cudaMalloc((void**) &d_g, ARRAY_BYTES);
cudaMalloc((void**) &d_b, ARRAY_BYTES);
cudaMemcpy(d_src, src, ARRAY_BYTES * 3, cudaMemcpyHostToDevice);
dim3 threads(1, 1);
dim3 blocks(col / threads.x + 1, row / threads.y + 1);
_split_channel_kernel_<<<blocks, threads>>>(d_src, d_r, d_g, d_b, row, col);
cudaDeviceSynchronize();
cudaMemcpy(r, d_r, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(g, d_g, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(b, d_b, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaFree(d_src);
cudaFree(d_r);
cudaFree(d_g);
cudaFree(d_b);
} |
384f562474aae621d9139a8157fd2e293f797330.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <float.h>
#include <math.h>
#include <time.h>
#include "../../constants.h"
#define N_RADIUS 4
#define N_THREADS_PLANE_DIM 8
#define N_THREADS_THIRD_DIM 4
__global__ void target_inner_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta
) {
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint i0 = x3 + blockIdx.z * blockDim.z;
const llint i = i0 + threadIdx.z;
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
if (i > x4-1 || j > y4-1 || k > z4-1) { return; }
float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)]
, __fmaf_rn(coefx_1, __fadd_rn(u[IDX3_l(i+1,j,k)],u[IDX3_l(i-1,j,k)])
, __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)])
, __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)])
, __fmaf_rn(coefx_2, __fadd_rn(u[IDX3_l(i+2,j,k)],u[IDX3_l(i-2,j,k)])
, __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)])
, __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)])
, __fmaf_rn(coefx_3, __fadd_rn(u[IDX3_l(i+3,j,k)],u[IDX3_l(i-3,j,k)])
, __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)])
, __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)])
, __fmaf_rn(coefx_4, __fadd_rn(u[IDX3_l(i+4,j,k)],u[IDX3_l(i-4,j,k)])
, __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)])
, __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)])
)))))))))))));
v[IDX3_l(i,j,k)] = __fmaf_rn(2.f, u[IDX3_l(i,j,k)],
__fmaf_rn(vp[IDX3(i,j,k)], lap, -v[IDX3_l(i,j,k)])
);
}
__global__ void target_pml_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
float *__restrict__ phi, const float *__restrict__ eta
) {
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint i0 = x3 + blockIdx.z * blockDim.z;
const llint i = i0 + threadIdx.z;
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
if (i > x4-1 || j > y4-1 || k > z4-1) { return; }
float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)]
, __fmaf_rn(coefx_1, __fadd_rn(u[IDX3_l(i+1,j,k)],u[IDX3_l(i-1,j,k)])
, __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)])
, __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)])
, __fmaf_rn(coefx_2, __fadd_rn(u[IDX3_l(i+2,j,k)],u[IDX3_l(i-2,j,k)])
, __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)])
, __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)])
, __fmaf_rn(coefx_3, __fadd_rn(u[IDX3_l(i+3,j,k)],u[IDX3_l(i-3,j,k)])
, __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)])
, __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)])
, __fmaf_rn(coefx_4, __fadd_rn(u[IDX3_l(i+4,j,k)],u[IDX3_l(i-4,j,k)])
, __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)])
, __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)])
)))))))))))));
const float s_eta_c = eta[IDX3_eta1(i,j,k)];
v[IDX3_l(i,j,k)] = __fdiv_rn(
__fmaf_rn(
__fmaf_rn(2.f, s_eta_c,
__fsub_rn(2.f,
__fmul_rn(s_eta_c, s_eta_c)
)
),
u[IDX3_l(i,j,k)],
__fmaf_rn(
vp[IDX3(i,j,k)],
__fadd_rn(lap, phi[IDX3(i,j,k)]),
-v[IDX3_l(i,j,k)]
)
),
__fmaf_rn(2.f, s_eta_c, 1.f)
);
phi[IDX3(i,j,k)] = __fdiv_rn(
__fsub_rn(
phi[IDX3(i,j,k)],
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i+1,j,k)], eta[IDX3_eta1(i-1,j,k)]),
__fsub_rn(u[IDX3_l(i+1,j,k)], u[IDX3_l(i-1,j,k)])
), hdx_2,
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j+1,k)], eta[IDX3_eta1(i,j-1,k)]),
__fsub_rn(u[IDX3_l(i,j+1,k)], u[IDX3_l(i,j-1,k)])
), hdy_2,
__fmul_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j,k+1)], eta[IDX3_eta1(i,j,k-1)]),
__fsub_rn(u[IDX3_l(i,j,k+1)], u[IDX3_l(i,j,k-1)])
),
hdz_2)
))
)
,
__fadd_rn(1.f, s_eta_c)
);
}
__global__ void kernel_add_source_kernel(float *g_u, llint idx, float source) {
g_u[idx] += source;
}
extern "C" void target(
uint nsteps, double *time_kernel,
llint nx, llint ny, llint nz,
llint x1, llint x2, llint x3, llint x4, llint x5, llint x6,
llint y1, llint y2, llint y3, llint y4, llint y5, llint y6,
llint z1, llint z2, llint z3, llint z4, llint z5, llint z6,
llint lx, llint ly, llint lz,
llint sx, llint sy, llint sz,
float hdx_2, float hdy_2, float hdz_2,
const float *__restrict__ coefx, const float *__restrict__ coefy, const float *__restrict__ coefz,
float *__restrict__ u, const float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta, const float *__restrict__ source
) {
struct timespec start, end;
const llint size_u = (nx + 2 * lx) * (ny + 2 * ly) * (nz + 2 * lz);
const llint size_v = size_u;
const llint size_phi = nx*ny*nz;
const llint size_vp = size_phi;
const llint size_eta = (nx+2)*(ny+2)*(nz+2);
float *d_u, *d_v, *d_vp, *d_phi, *d_eta;
hipMalloc(&d_u, sizeof(float) * size_u);
hipMalloc(&d_v, sizeof(float) * size_u);
hipMalloc(&d_vp, sizeof(float) * size_vp);
hipMalloc(&d_phi, sizeof(float) * size_phi);
hipMalloc(&d_eta, sizeof(float) * size_eta);
hipMemcpy(d_u, u, sizeof(float) * size_u, hipMemcpyHostToDevice);
hipMemcpy(d_v, v, sizeof(float) * size_v, hipMemcpyHostToDevice);
hipMemcpy(d_vp, vp, sizeof(float) * size_vp, hipMemcpyHostToDevice);
hipMemcpy(d_phi, phi, sizeof(float) * size_phi, hipMemcpyHostToDevice);
hipMemcpy(d_eta, eta, sizeof(float) * size_eta, hipMemcpyHostToDevice);
const llint xmin = 0; const llint xmax = nx;
const llint ymin = 0; const llint ymax = ny;
dim3 threadsPerBlock(N_THREADS_THIRD_DIM, N_THREADS_PLANE_DIM, N_THREADS_PLANE_DIM);
int num_streams = 7;
hipStream_t streams[num_streams];
for (int i = 0; i < num_streams; i++) {
hipStreamCreate(&(streams[i]));
}
const uint npo = 100;
for (uint istep = 1; istep <= nsteps; ++istep) {
clock_gettime(CLOCK_REALTIME, &start);
dim3 n_block_front(
(z2-z1+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(ny+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(nx+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_front), dim3(threadsPerBlock), 0, streams[1], nx,ny,nz,
xmin,xmax,ymin,ymax,z1,z2,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_top(
(z4-z3+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(y2-y1+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(nx+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_top), dim3(threadsPerBlock), 0, streams[2], nx,ny,nz,
xmin,xmax,y1,y2,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_left(
(z4-z3+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(y4-y3+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(x2-x1+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_left), dim3(threadsPerBlock), 0, streams[3], nx,ny,nz,
x1,x2,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_center(
(z4-z3+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(y4-y3+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(x4-x3+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
hipLaunchKernelGGL(( target_inner_3d_kernel), dim3(n_block_center), dim3(threadsPerBlock), 0, streams[0], nx,ny,nz,
x3,x4,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_right(
(z4-z3+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(y4-y3+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(x6-x5+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_right), dim3(threadsPerBlock), 0, streams[4], nx,ny,nz,
x5,x6,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_bottom(
(z4-z3+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(y6-y5+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(nx+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_bottom), dim3(threadsPerBlock), 0, streams[5], nx,ny,nz,
xmin,xmax,y5,y6,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_back(
(z6-z5+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(ny+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(nx+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_back), dim3(threadsPerBlock), 0, streams[6], nx,ny,nz,
xmin,xmax,ymin,ymax,z5,z6,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
for (int i = 0; i < num_streams; i++) {
hipStreamSynchronize(streams[i]);
}
hipLaunchKernelGGL(( kernel_add_source_kernel), dim3(1), dim3(1), 0, 0, d_v, IDX3_l(sx,sy,sz), source[istep]);
clock_gettime(CLOCK_REALTIME, &end);
*time_kernel += (end.tv_sec - start.tv_sec) +
(double)(end.tv_nsec - start.tv_nsec) / 1.0e9;
float *t = d_u;
d_u = d_v;
d_v = t;
// Print out
if (istep % npo == 0) {
printf("time step %u / %u\n", istep, nsteps);
}
}
for (int i = 0; i < num_streams; i++) {
hipStreamDestroy(streams[i]);
}
hipMemcpy(u, d_u, sizeof(float) * size_u, hipMemcpyDeviceToHost);
hipFree(d_u);
hipFree(d_v);
hipFree(d_vp);
hipFree(d_phi);
hipFree(d_eta);
}
| 384f562474aae621d9139a8157fd2e293f797330.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <float.h>
#include <math.h>
#include <time.h>
#include "../../constants.h"
#define N_RADIUS 4
#define N_THREADS_PLANE_DIM 8
#define N_THREADS_THIRD_DIM 4
__global__ void target_inner_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta
) {
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint i0 = x3 + blockIdx.z * blockDim.z;
const llint i = i0 + threadIdx.z;
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
if (i > x4-1 || j > y4-1 || k > z4-1) { return; }
float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)]
, __fmaf_rn(coefx_1, __fadd_rn(u[IDX3_l(i+1,j,k)],u[IDX3_l(i-1,j,k)])
, __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)])
, __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)])
, __fmaf_rn(coefx_2, __fadd_rn(u[IDX3_l(i+2,j,k)],u[IDX3_l(i-2,j,k)])
, __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)])
, __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)])
, __fmaf_rn(coefx_3, __fadd_rn(u[IDX3_l(i+3,j,k)],u[IDX3_l(i-3,j,k)])
, __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)])
, __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)])
, __fmaf_rn(coefx_4, __fadd_rn(u[IDX3_l(i+4,j,k)],u[IDX3_l(i-4,j,k)])
, __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)])
, __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)])
)))))))))))));
v[IDX3_l(i,j,k)] = __fmaf_rn(2.f, u[IDX3_l(i,j,k)],
__fmaf_rn(vp[IDX3(i,j,k)], lap, -v[IDX3_l(i,j,k)])
);
}
__global__ void target_pml_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
float *__restrict__ phi, const float *__restrict__ eta
) {
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint i0 = x3 + blockIdx.z * blockDim.z;
const llint i = i0 + threadIdx.z;
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
if (i > x4-1 || j > y4-1 || k > z4-1) { return; }
float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)]
, __fmaf_rn(coefx_1, __fadd_rn(u[IDX3_l(i+1,j,k)],u[IDX3_l(i-1,j,k)])
, __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)])
, __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)])
, __fmaf_rn(coefx_2, __fadd_rn(u[IDX3_l(i+2,j,k)],u[IDX3_l(i-2,j,k)])
, __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)])
, __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)])
, __fmaf_rn(coefx_3, __fadd_rn(u[IDX3_l(i+3,j,k)],u[IDX3_l(i-3,j,k)])
, __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)])
, __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)])
, __fmaf_rn(coefx_4, __fadd_rn(u[IDX3_l(i+4,j,k)],u[IDX3_l(i-4,j,k)])
, __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)])
, __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)])
)))))))))))));
const float s_eta_c = eta[IDX3_eta1(i,j,k)];
v[IDX3_l(i,j,k)] = __fdiv_rn(
__fmaf_rn(
__fmaf_rn(2.f, s_eta_c,
__fsub_rn(2.f,
__fmul_rn(s_eta_c, s_eta_c)
)
),
u[IDX3_l(i,j,k)],
__fmaf_rn(
vp[IDX3(i,j,k)],
__fadd_rn(lap, phi[IDX3(i,j,k)]),
-v[IDX3_l(i,j,k)]
)
),
__fmaf_rn(2.f, s_eta_c, 1.f)
);
phi[IDX3(i,j,k)] = __fdiv_rn(
__fsub_rn(
phi[IDX3(i,j,k)],
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i+1,j,k)], eta[IDX3_eta1(i-1,j,k)]),
__fsub_rn(u[IDX3_l(i+1,j,k)], u[IDX3_l(i-1,j,k)])
), hdx_2,
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j+1,k)], eta[IDX3_eta1(i,j-1,k)]),
__fsub_rn(u[IDX3_l(i,j+1,k)], u[IDX3_l(i,j-1,k)])
), hdy_2,
__fmul_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j,k+1)], eta[IDX3_eta1(i,j,k-1)]),
__fsub_rn(u[IDX3_l(i,j,k+1)], u[IDX3_l(i,j,k-1)])
),
hdz_2)
))
)
,
__fadd_rn(1.f, s_eta_c)
);
}
__global__ void kernel_add_source_kernel(float *g_u, llint idx, float source) {
g_u[idx] += source;
}
extern "C" void target(
uint nsteps, double *time_kernel,
llint nx, llint ny, llint nz,
llint x1, llint x2, llint x3, llint x4, llint x5, llint x6,
llint y1, llint y2, llint y3, llint y4, llint y5, llint y6,
llint z1, llint z2, llint z3, llint z4, llint z5, llint z6,
llint lx, llint ly, llint lz,
llint sx, llint sy, llint sz,
float hdx_2, float hdy_2, float hdz_2,
const float *__restrict__ coefx, const float *__restrict__ coefy, const float *__restrict__ coefz,
float *__restrict__ u, const float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta, const float *__restrict__ source
) {
struct timespec start, end;
const llint size_u = (nx + 2 * lx) * (ny + 2 * ly) * (nz + 2 * lz);
const llint size_v = size_u;
const llint size_phi = nx*ny*nz;
const llint size_vp = size_phi;
const llint size_eta = (nx+2)*(ny+2)*(nz+2);
float *d_u, *d_v, *d_vp, *d_phi, *d_eta;
cudaMalloc(&d_u, sizeof(float) * size_u);
cudaMalloc(&d_v, sizeof(float) * size_u);
cudaMalloc(&d_vp, sizeof(float) * size_vp);
cudaMalloc(&d_phi, sizeof(float) * size_phi);
cudaMalloc(&d_eta, sizeof(float) * size_eta);
cudaMemcpy(d_u, u, sizeof(float) * size_u, cudaMemcpyHostToDevice);
cudaMemcpy(d_v, v, sizeof(float) * size_v, cudaMemcpyHostToDevice);
cudaMemcpy(d_vp, vp, sizeof(float) * size_vp, cudaMemcpyHostToDevice);
cudaMemcpy(d_phi, phi, sizeof(float) * size_phi, cudaMemcpyHostToDevice);
cudaMemcpy(d_eta, eta, sizeof(float) * size_eta, cudaMemcpyHostToDevice);
const llint xmin = 0; const llint xmax = nx;
const llint ymin = 0; const llint ymax = ny;
dim3 threadsPerBlock(N_THREADS_THIRD_DIM, N_THREADS_PLANE_DIM, N_THREADS_PLANE_DIM);
int num_streams = 7;
cudaStream_t streams[num_streams];
for (int i = 0; i < num_streams; i++) {
cudaStreamCreate(&(streams[i]));
}
const uint npo = 100;
for (uint istep = 1; istep <= nsteps; ++istep) {
clock_gettime(CLOCK_REALTIME, &start);
dim3 n_block_front(
(z2-z1+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(ny+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(nx+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
target_pml_3d_kernel<<<n_block_front, threadsPerBlock, 0, streams[1]>>>(nx,ny,nz,
xmin,xmax,ymin,ymax,z1,z2,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_top(
(z4-z3+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(y2-y1+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(nx+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
target_pml_3d_kernel<<<n_block_top, threadsPerBlock, 0, streams[2]>>>(nx,ny,nz,
xmin,xmax,y1,y2,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_left(
(z4-z3+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(y4-y3+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(x2-x1+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
target_pml_3d_kernel<<<n_block_left, threadsPerBlock, 0, streams[3]>>>(nx,ny,nz,
x1,x2,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_center(
(z4-z3+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(y4-y3+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(x4-x3+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
target_inner_3d_kernel<<<n_block_center, threadsPerBlock, 0, streams[0]>>>(nx,ny,nz,
x3,x4,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_right(
(z4-z3+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(y4-y3+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(x6-x5+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
target_pml_3d_kernel<<<n_block_right, threadsPerBlock, 0, streams[4]>>>(nx,ny,nz,
x5,x6,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_bottom(
(z4-z3+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(y6-y5+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(nx+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
target_pml_3d_kernel<<<n_block_bottom, threadsPerBlock, 0, streams[5]>>>(nx,ny,nz,
xmin,xmax,y5,y6,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_back(
(z6-z5+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM,
(ny+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM,
(nx+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM);
target_pml_3d_kernel<<<n_block_back, threadsPerBlock, 0, streams[6]>>>(nx,ny,nz,
xmin,xmax,ymin,ymax,z5,z6,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
for (int i = 0; i < num_streams; i++) {
cudaStreamSynchronize(streams[i]);
}
kernel_add_source_kernel<<<1, 1>>>(d_v, IDX3_l(sx,sy,sz), source[istep]);
clock_gettime(CLOCK_REALTIME, &end);
*time_kernel += (end.tv_sec - start.tv_sec) +
(double)(end.tv_nsec - start.tv_nsec) / 1.0e9;
float *t = d_u;
d_u = d_v;
d_v = t;
// Print out
if (istep % npo == 0) {
printf("time step %u / %u\n", istep, nsteps);
}
}
for (int i = 0; i < num_streams; i++) {
cudaStreamDestroy(streams[i]);
}
cudaMemcpy(u, d_u, sizeof(float) * size_u, cudaMemcpyDeviceToHost);
cudaFree(d_u);
cudaFree(d_v);
cudaFree(d_vp);
cudaFree(d_phi);
cudaFree(d_eta);
}
|
19ff23096ae9499191c5c46ed8687a18f005b284.hip | // !!! This is a file automatically generated by hipify!!!
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "ppl/common/types.h"
#include "cudakernel/nn/conv/group_padding.h"
#include "cudakernel/common/divmod_fast.h"
#include "conv_common.h"
template <typename T>
__global__ void group_padding(T *output, T *input, uint64_t out_size, const int num_grp, const int num_chl_per_grp, const int num_chl_pad, int num_chl_per_grp_pad)
{
uint64_t out_off = blockIdx.x * blockDim.x + threadIdx.x;
// in this case, num_chl_per_grp is naturally not aligned with padding size,
// so we just use T to access memory.
T value = 0;
int chl_id_in_grp = out_off % (num_chl_per_grp_pad); // FIXME magic
uint64_t nhw_id = out_off / (num_chl_per_grp_pad * num_grp); // FIXME magic
int total_chl_id = out_off - nhw_id * num_chl_per_grp_pad * num_grp;
int grp_id = total_chl_id / num_chl_per_grp_pad;
uint64_t in_off = nhw_id * num_chl_pad + grp_id * num_chl_per_grp + chl_id_in_grp;
if (out_off < out_size) {
if (chl_id_in_grp < num_chl_per_grp)
value = input[in_off];
output[out_off] = value;
}
}
template <typename T>
__global__ void split_group(
T *output,
T *input,
DivModFast fast_div_channel,
uint64_t out_size,
const int num_grp,
const int num_chl_per_grp,
const int num_chl,
int num_chl_per_grp_pad)
{
int32_t out_off = blockIdx.x * blockDim.x + threadIdx.x;
if (out_off >= out_size)
return;
int32_t channel = fast_div_channel.mod(out_off);
bool in_range = channel < num_chl_per_grp;
int32_t nhw_id = out_off / (num_chl_per_grp_pad * num_grp);
int32_t grp_id = (fast_div_channel.div(out_off)) % num_grp;
int32_t in_off = nhw_id * num_chl + grp_id * num_chl_per_grp + channel;
T value = in_range ? input[in_off] : T(0);
output[out_off] = value;
}
template <typename T>
__global__ void merge_group(
T *output,
T *input,
DivModFast fast_div_channel,
uint64_t out_size,
const int num_grp,
const int num_chl_per_grp,
const int num_chl,
int num_chl_per_grp_pad,
int flt_align)
{
int32_t out_off = blockIdx.x * blockDim.x + threadIdx.x;
if (out_off >= out_size)
return;
int32_t channel = fast_div_channel.mod(out_off);
int32_t nhw_id = out_off / (flt_align);
int chl_id = out_off % flt_align;
int32_t grp_id = (fast_div_channel.div(out_off)) % num_grp;
int32_t in_off = nhw_id * num_grp * num_chl_per_grp_pad + grp_id * num_chl_per_grp_pad + channel;
output[out_off] = chl_id < num_chl ? input[in_off] : T(0);
}
template <typename T>
__global__ void flt_group_padding(T *output, T *input, unsigned int in_size_per_grp, const int num_grp, int num_chl_per_grp_pad, unsigned int out_size_per_grp)
{
unsigned int in_off = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int grp_id = blockIdx.y;
bool in_range = (in_off < in_size_per_grp);
T value = in_range ? input[in_off + grp_id * in_size_per_grp] : (T)0;
unsigned int c_id = in_off % num_chl_per_grp_pad;
unsigned int nhw_id = in_off / num_chl_per_grp_pad;
unsigned int out_off = nhw_id * num_chl_per_grp_pad + grp_id * out_size_per_grp + c_id;
if (in_range)
output[out_off] = value;
}
void PPLCUDAConvolutionCvtFlt(
hipStream_t &stream,
void *output,
const void *input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int flt_num = conv_param.num_flt;
const int num_chl = conv_param.num_chl;
const int flt_height = conv_param.flt_height;
const int flt_width = conv_param.flt_width;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_chl_per_grp = num_chl / num_grp;
int num_chl_per_grp_pad = Align(num_chl_per_grp, align_size);
int num_flt_per_grp = flt_num / num_grp;
int num_flt_per_grp_pad = Align(num_flt_per_grp, align_size);
const int cta_size = 512;
dim3 grid;
int in_size_per_grp = flt_num / num_grp * flt_height * flt_width * num_chl_per_grp_pad;
int out_size_per_grp = num_flt_per_grp_pad * flt_height * flt_width * num_chl_per_grp_pad;
grid.x = DivUp(in_size_per_grp, cta_size);
grid.y = num_grp;
grid.z = 1;
if (type == ppl::common::DATATYPE_FLOAT32) {
hipMemset(output, 0, sizeof(float) * num_grp * out_size_per_grp);
hipLaunchKernelGGL(( flt_group_padding<float>), dim3(grid), dim3(cta_size), 0, stream, (float *)output, (float *)input, in_size_per_grp, num_grp, num_chl_per_grp_pad, out_size_per_grp);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipMemset(output, 0, sizeof(half) * num_grp * out_size_per_grp);
hipLaunchKernelGGL(( flt_group_padding<__half>), dim3(grid), dim3(cta_size), 0, stream, (__half *)output, (__half *)input, in_size_per_grp, num_grp, num_chl_per_grp_pad, out_size_per_grp);
}
}
void PPLCUDAConvolutionCvtInput(
hipStream_t &stream,
void *output,
const void *input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int in_num = conv_param.in_num;
const int num_chl = conv_param.num_chl;
const int in_height = conv_param.in_height;
const int in_width = conv_param.in_width;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_chl_per_grp = num_chl / num_grp;
int num_chl_per_grp_pad = Align(num_chl_per_grp, align_size);
const int cta_size = 512;
uint64_t out_size = in_num * in_height * in_width * num_chl_per_grp_pad * num_grp;
DivModFast fast_div_channel(num_chl_per_grp_pad);
dim3 grid(DivUp(out_size, cta_size), 1, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( split_group<float>), dim3(grid), dim3(cta_size), 0, stream, (float *)output, (float *)input, fast_div_channel, out_size, num_grp, num_chl_per_grp, num_chl, num_chl_per_grp_pad);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( split_group<__half>), dim3(grid), dim3(cta_size), 0, stream, (__half *)output, (__half *)input, fast_div_channel, out_size, num_grp, num_chl_per_grp, num_chl, num_chl_per_grp_pad);
}
}
void PPLCUDAConvolutionCvtOutput(
hipStream_t &stream,
void *output,
const void *input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int in_num = conv_param.in_num;
const int num_flt = conv_param.num_flt;
const int out_height = conv_param.out_height;
const int out_width = conv_param.out_width;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_flt_per_grp = num_flt / num_grp; // FIXME magic
int num_flt_per_grp_pad = Align(num_flt_per_grp, align_size);
int flt_align = Align(num_flt, align_size);
const int cta_size = 512;
uint64_t out_size = in_num * out_height * out_width * flt_align;
DivModFast fast_div_channel(num_flt_per_grp);
dim3 grid(DivUp(out_size, cta_size), 1, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( merge_group<float>), dim3(grid), dim3(cta_size), 0, stream, (float *)output, (float *)input, fast_div_channel, out_size, num_grp, num_flt_per_grp, num_flt, num_flt_per_grp_pad, flt_align);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( merge_group<__half>), dim3(grid), dim3(cta_size), 0, stream, (__half *)output, (__half *)input, fast_div_channel, out_size, num_grp, num_flt_per_grp, num_flt, num_flt_per_grp_pad, flt_align);
}
}
void PPLCUDAConvolutionCvtBias(
hipStream_t &stream,
void *output,
const void *input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int flt_num = conv_param.num_flt;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_flt_per_grp = flt_num / num_grp;
int num_flt_per_grp_pad = Align(num_flt_per_grp, align_size);
const int cta_size = 256;
dim3 grid;
int out_size = num_flt_per_grp_pad * num_grp;
// int in_size = conv_param.num_flt_pad;
grid.x = DivUp(out_size, cta_size);
grid.y = 1;
grid.z = 1;
if (type == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( group_padding<float>), dim3(grid), dim3(cta_size), 0, stream,
(float *)output, (float *)input, out_size, num_grp, num_flt_per_grp, conv_param.num_flt_pad, num_flt_per_grp_pad);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( group_padding<__half>), dim3(grid), dim3(cta_size), 0, stream,
(__half *)output, (__half *)input, out_size, num_grp, num_flt_per_grp, conv_param.num_flt_pad, num_flt_per_grp_pad);
}
}
| 19ff23096ae9499191c5c46ed8687a18f005b284.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <cuda.h>
#include <cuda_fp16.h>
#include "ppl/common/types.h"
#include "cudakernel/nn/conv/group_padding.h"
#include "cudakernel/common/divmod_fast.h"
#include "conv_common.h"
template <typename T>
__global__ void group_padding(T *output, T *input, uint64_t out_size, const int num_grp, const int num_chl_per_grp, const int num_chl_pad, int num_chl_per_grp_pad)
{
uint64_t out_off = blockIdx.x * blockDim.x + threadIdx.x;
// in this case, num_chl_per_grp is naturally not aligned with padding size,
// so we just use T to access memory.
T value = 0;
int chl_id_in_grp = out_off % (num_chl_per_grp_pad); // FIXME magic
uint64_t nhw_id = out_off / (num_chl_per_grp_pad * num_grp); // FIXME magic
int total_chl_id = out_off - nhw_id * num_chl_per_grp_pad * num_grp;
int grp_id = total_chl_id / num_chl_per_grp_pad;
uint64_t in_off = nhw_id * num_chl_pad + grp_id * num_chl_per_grp + chl_id_in_grp;
if (out_off < out_size) {
if (chl_id_in_grp < num_chl_per_grp)
value = input[in_off];
output[out_off] = value;
}
}
template <typename T>
__global__ void split_group(
T *output,
T *input,
DivModFast fast_div_channel,
uint64_t out_size,
const int num_grp,
const int num_chl_per_grp,
const int num_chl,
int num_chl_per_grp_pad)
{
int32_t out_off = blockIdx.x * blockDim.x + threadIdx.x;
if (out_off >= out_size)
return;
int32_t channel = fast_div_channel.mod(out_off);
bool in_range = channel < num_chl_per_grp;
int32_t nhw_id = out_off / (num_chl_per_grp_pad * num_grp);
int32_t grp_id = (fast_div_channel.div(out_off)) % num_grp;
int32_t in_off = nhw_id * num_chl + grp_id * num_chl_per_grp + channel;
T value = in_range ? input[in_off] : T(0);
output[out_off] = value;
}
template <typename T>
__global__ void merge_group(
T *output,
T *input,
DivModFast fast_div_channel,
uint64_t out_size,
const int num_grp,
const int num_chl_per_grp,
const int num_chl,
int num_chl_per_grp_pad,
int flt_align)
{
int32_t out_off = blockIdx.x * blockDim.x + threadIdx.x;
if (out_off >= out_size)
return;
int32_t channel = fast_div_channel.mod(out_off);
int32_t nhw_id = out_off / (flt_align);
int chl_id = out_off % flt_align;
int32_t grp_id = (fast_div_channel.div(out_off)) % num_grp;
int32_t in_off = nhw_id * num_grp * num_chl_per_grp_pad + grp_id * num_chl_per_grp_pad + channel;
output[out_off] = chl_id < num_chl ? input[in_off] : T(0);
}
template <typename T>
__global__ void flt_group_padding(T *output, T *input, unsigned int in_size_per_grp, const int num_grp, int num_chl_per_grp_pad, unsigned int out_size_per_grp)
{
unsigned int in_off = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int grp_id = blockIdx.y;
bool in_range = (in_off < in_size_per_grp);
T value = in_range ? input[in_off + grp_id * in_size_per_grp] : (T)0;
unsigned int c_id = in_off % num_chl_per_grp_pad;
unsigned int nhw_id = in_off / num_chl_per_grp_pad;
unsigned int out_off = nhw_id * num_chl_per_grp_pad + grp_id * out_size_per_grp + c_id;
if (in_range)
output[out_off] = value;
}
void PPLCUDAConvolutionCvtFlt(
cudaStream_t &stream,
void *output,
const void *input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int flt_num = conv_param.num_flt;
const int num_chl = conv_param.num_chl;
const int flt_height = conv_param.flt_height;
const int flt_width = conv_param.flt_width;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_chl_per_grp = num_chl / num_grp;
int num_chl_per_grp_pad = Align(num_chl_per_grp, align_size);
int num_flt_per_grp = flt_num / num_grp;
int num_flt_per_grp_pad = Align(num_flt_per_grp, align_size);
const int cta_size = 512;
dim3 grid;
int in_size_per_grp = flt_num / num_grp * flt_height * flt_width * num_chl_per_grp_pad;
int out_size_per_grp = num_flt_per_grp_pad * flt_height * flt_width * num_chl_per_grp_pad;
grid.x = DivUp(in_size_per_grp, cta_size);
grid.y = num_grp;
grid.z = 1;
if (type == ppl::common::DATATYPE_FLOAT32) {
cudaMemset(output, 0, sizeof(float) * num_grp * out_size_per_grp);
flt_group_padding<float><<<grid, cta_size, 0, stream>>>((float *)output, (float *)input, in_size_per_grp, num_grp, num_chl_per_grp_pad, out_size_per_grp);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
cudaMemset(output, 0, sizeof(half) * num_grp * out_size_per_grp);
flt_group_padding<__half><<<grid, cta_size, 0, stream>>>((__half *)output, (__half *)input, in_size_per_grp, num_grp, num_chl_per_grp_pad, out_size_per_grp);
}
}
void PPLCUDAConvolutionCvtInput(
cudaStream_t &stream,
void *output,
const void *input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int in_num = conv_param.in_num;
const int num_chl = conv_param.num_chl;
const int in_height = conv_param.in_height;
const int in_width = conv_param.in_width;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_chl_per_grp = num_chl / num_grp;
int num_chl_per_grp_pad = Align(num_chl_per_grp, align_size);
const int cta_size = 512;
uint64_t out_size = in_num * in_height * in_width * num_chl_per_grp_pad * num_grp;
DivModFast fast_div_channel(num_chl_per_grp_pad);
dim3 grid(DivUp(out_size, cta_size), 1, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
split_group<float><<<grid, cta_size, 0, stream>>>((float *)output, (float *)input, fast_div_channel, out_size, num_grp, num_chl_per_grp, num_chl, num_chl_per_grp_pad);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
split_group<__half><<<grid, cta_size, 0, stream>>>((__half *)output, (__half *)input, fast_div_channel, out_size, num_grp, num_chl_per_grp, num_chl, num_chl_per_grp_pad);
}
}
void PPLCUDAConvolutionCvtOutput(
cudaStream_t &stream,
void *output,
const void *input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int in_num = conv_param.in_num;
const int num_flt = conv_param.num_flt;
const int out_height = conv_param.out_height;
const int out_width = conv_param.out_width;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_flt_per_grp = num_flt / num_grp; // FIXME magic
int num_flt_per_grp_pad = Align(num_flt_per_grp, align_size);
int flt_align = Align(num_flt, align_size);
const int cta_size = 512;
uint64_t out_size = in_num * out_height * out_width * flt_align;
DivModFast fast_div_channel(num_flt_per_grp);
dim3 grid(DivUp(out_size, cta_size), 1, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
merge_group<float><<<grid, cta_size, 0, stream>>>((float *)output, (float *)input, fast_div_channel, out_size, num_grp, num_flt_per_grp, num_flt, num_flt_per_grp_pad, flt_align);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
merge_group<__half><<<grid, cta_size, 0, stream>>>((__half *)output, (__half *)input, fast_div_channel, out_size, num_grp, num_flt_per_grp, num_flt, num_flt_per_grp_pad, flt_align);
}
}
void PPLCUDAConvolutionCvtBias(
cudaStream_t &stream,
void *output,
const void *input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int flt_num = conv_param.num_flt;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_flt_per_grp = flt_num / num_grp;
int num_flt_per_grp_pad = Align(num_flt_per_grp, align_size);
const int cta_size = 256;
dim3 grid;
int out_size = num_flt_per_grp_pad * num_grp;
// int in_size = conv_param.num_flt_pad;
grid.x = DivUp(out_size, cta_size);
grid.y = 1;
grid.z = 1;
if (type == ppl::common::DATATYPE_FLOAT32) {
group_padding<float><<<grid, cta_size, 0, stream>>>(
(float *)output, (float *)input, out_size, num_grp, num_flt_per_grp, conv_param.num_flt_pad, num_flt_per_grp_pad);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
group_padding<__half><<<grid, cta_size, 0, stream>>>(
(__half *)output, (__half *)input, out_size, num_grp, num_flt_per_grp, conv_param.num_flt_pad, num_flt_per_grp_pad);
}
}
|
8aaf08a2703054bf628b1b737efabf32711037aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/fast_divmod.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/interpolate_function.h"
#include "paddle/phi/kernels/interpolate_kernel.h"
namespace phi {
using paddle::platform::FastDivMod;
template <typename T>
__forceinline__ __device__ void PreCalculatorForLinearInterpInputIndex(
int* in_img_idx,
int* x_id,
T* lambda1,
T* lambda2,
T src_x,
const int in_img_x) {
src_x = (src_x > 0) ? src_x : 0.f;
*in_img_idx = static_cast<int>(src_x);
*x_id = (*in_img_idx < in_img_x - 1) ? 1 : 0;
*lambda1 = src_x - *in_img_idx;
*lambda2 = 1.f - *lambda1;
}
template <typename T>
__global__ void KeLinearInterpFw(const T* in,
const size_t in_img_w,
const size_t input_w,
T* out,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_w,
const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idx = tid % out_img_w;
} else {
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
if (data_layout == DataLayout::kNCHW) {
const T* in_pos =
&in[out_id_h * out_id_w + channel_id * in_img_size + in_img_idx];
// linear interpolation
out[out_id_h * output_w + out_id_w] =
w2lambda * in_pos[0] + w1lambda * in_pos[w_id];
} else {
const T* in_pos =
&in[out_id_h * input_w + in_img_idx * num_channels + channel_id];
// linear interpolation
out[out_id_h * output_w + out_id_w] =
w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels];
}
}
}
template <typename T>
__global__ void KeNearestNeighborInterpNCHWFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t nc,
const float ratio_h,
const float ratio_w,
const bool align_corners) {
int out_img_idx = threadIdx.x + blockIdx.x * blockDim.x;
int out_img_idy = threadIdx.y + blockIdx.y * blockDim.y;
int nc_id = threadIdx.z + blockIdx.z * blockDim.z;
int nc_stride = blockDim.z * gridDim.z;
// nearest_sampling by multiple read in_addr and write to out_addr
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_index = (nc_id * in_img_h + in_img_idy) * in_img_w + in_img_idx;
int in_index_stride = nc_stride * in_img_h * in_img_w;
int out_index = (nc_id * out_img_h + out_img_idy) * out_img_w + out_img_idx;
int out_index_stride = nc_stride * out_img_h * out_img_w;
// prevent from multiple threads writing
if (out_img_idx < out_img_w && out_img_idy < out_img_h) {
while (nc_id < nc) {
out[out_index] = in[in_index];
in_index += in_index_stride;
out_index += out_index_stride;
nc_id += nc_stride;
}
}
}
template <typename T>
__global__ void KeNearestNeighborInterpFw(
const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
funcs::FastDivModForInterpolate divmods) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int in_img_size = in_img_h * in_img_w;
int out_img_size = out_img_h * out_img_w;
for (; tid < nthreads; tid += stride) {
auto out_id_divmod = divmods.output_w_div.Divmod(tid);
int out_id_h = out_id_divmod.val[0];
int out_id_w = out_id_divmod.val[1];
int channel_id = divmods.channels_div.Divmod(tid).val[1];
auto outimg_id_divmod = divmods.output_wc_div.Divmod(out_id_w);
int out_img_idy = outimg_id_divmod.val[0];
int out_img_idx =
divmods.channels_div.Divmod(outimg_id_divmod.val[1]).val[0];
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
out[tid] = in[out_id_h * input_w + in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
}
}
template <typename T>
__global__ void KeBilinearInterpFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const T align_type_value,
funcs::FastDivModForInterpolate divmods) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
auto out_id_divmod = divmods.output_w_div.Divmod(tid);
int out_id_h = out_id_divmod.val[0];
int out_id_w = out_id_divmod.val[1];
int channel_id = divmods.channels_div.Divmod(tid).val[1];
auto outimg_id_divmod = divmods.output_wc_div.Divmod(out_id_w);
int out_img_idy = outimg_id_divmod.val[0];
int out_img_idx =
divmods.channels_div.Divmod(outimg_id_divmod.val[1]).val[0];
int in_img_idx, in_img_idy, h_id, w_id;
T h1lambda, w1lambda, h2lambda, w2lambda;
T src_w = ratio_w * (out_img_idx + align_type_value) - align_type_value;
T src_h = ratio_h * (out_img_idy + align_type_value) - align_type_value;
PreCalculatorForLinearInterpInputIndex(
&in_img_idx, &w_id, &w1lambda, &w2lambda, src_w, in_img_w);
PreCalculatorForLinearInterpInputIndex(
&in_img_idy, &h_id, &h1lambda, &h2lambda, src_h, in_img_h);
// bilinear interpolation
const T* in_pos =
&in[out_id_h * input_w + in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
out[tid] =
h2lambda *
(w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]) +
h1lambda *
(w2lambda * in_pos[h_id * in_img_w * num_channels] +
w1lambda *
in_pos[h_id * in_img_w * num_channels + w_id * num_channels]);
}
}
template <typename T>
__global__ void KeBilinearInterpNCHWFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t nc,
const float ratio_h,
const float ratio_w,
const T align_type_value) {
int out_img_idx = threadIdx.x + blockIdx.x * blockDim.x;
int out_img_idy = threadIdx.y + blockIdx.y * blockDim.y;
int nc_id = threadIdx.z + blockIdx.z * blockDim.z;
int nc_stride = blockDim.z * gridDim.z;
int in_img_idx, in_img_idy, h_id, w_id;
T h1lambda, w1lambda, h2lambda, w2lambda;
T src_w = ratio_w * (out_img_idx + align_type_value) - align_type_value;
T src_h = ratio_h * (out_img_idy + align_type_value) - align_type_value;
PreCalculatorForLinearInterpInputIndex(
&in_img_idx, &w_id, &w1lambda, &w2lambda, src_w, in_img_w);
PreCalculatorForLinearInterpInputIndex(
&in_img_idy, &h_id, &h1lambda, &h2lambda, src_h, in_img_h);
int in_index = (nc_id * in_img_h + in_img_idy) * in_img_w + in_img_idx;
int in_index_stride = nc_stride * in_img_h * in_img_w;
int out_index = (nc_id * out_img_h + out_img_idy) * out_img_w + out_img_idx;
int out_index_stride = nc_stride * out_img_h * out_img_w;
// prevent from multiple threads writing
if (out_img_idx < out_img_w && out_img_idy < out_img_h) {
while (nc_id < nc) {
const T* in_pos = &in[in_index];
out[out_index] =
h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) +
h1lambda * (w2lambda * in_pos[h_id * in_img_w] +
w1lambda * in_pos[h_id * in_img_w + w_id]);
in_index += in_index_stride;
out_index += out_index_stride;
nc_id += nc_stride;
}
}
}
template <typename T>
__device__ __forceinline__ static T Kecubic_interp(
const T x0, const T x1, const T x2, const T x3, T t) {
T coeffs[4];
T a = -0.75;
T x_1 = t;
T x_2 = 1.0 - t;
coeffs[0] = funcs::CubicConvolution2<T>(x_1 + 1.0, a);
coeffs[1] = funcs::CubicConvolution1<T>(x_1, a);
coeffs[2] = funcs::CubicConvolution1<T>(x_2, a);
coeffs[3] = funcs::CubicConvolution2<T>(x_2 + 1.0, a);
return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3];
}
template <typename T>
__global__ void KeBicubicInterpFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idy = (out_id_w % out_img_size) / out_img_w;
out_img_idx = tid % out_img_w;
} else {
out_img_idy = out_id_w / (out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
T in_img_idy = align_corners
? static_cast<T>(ratio_h * out_img_idy)
: static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5);
int input_y = floorf(in_img_idy);
const T y_t = in_img_idy - input_y;
T in_img_idx = align_corners
? static_cast<T>(ratio_w * out_img_idx)
: static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5);
int input_x = floorf(in_img_idx);
const T x_t = in_img_idx - input_x;
T coefficients[4];
const T* in_pos_0;
const T* in_pos_1;
const T* in_pos_2;
const T* in_pos_3;
int access_x_0;
if (data_layout == DataLayout::kNCHW) {
for (int k = 0; k < 4; k++) {
int access_y =
max(min(input_y - 1 + k, static_cast<int>(in_img_h - 1)), 0);
access_x_0 = max(min(input_x - 1, static_cast<int>(in_img_w - 1)), 0);
int access_x_1 =
max(min(input_x + 0, static_cast<int>(in_img_w - 1)), 0);
int access_x_2 =
max(min(input_x + 1, static_cast<int>(in_img_w - 1)), 0);
int access_x_3 =
max(min(input_x + 2, static_cast<int>(in_img_w - 1)), 0);
in_pos_0 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_0];
in_pos_1 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_1];
in_pos_2 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_2];
in_pos_3 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_3];
coefficients[k] = Kecubic_interp<T>(
in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t);
}
out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_t);
} else {
for (int k = 0; k < 4; k++) {
int access_y =
max(min(input_y - 1 + k, static_cast<int>((in_img_h - 1))), 0);
int access_x_0 =
max(min(input_x - 1, static_cast<int>((in_img_w - 1))), 0);
int access_x_1 =
max(min(input_x + 0, static_cast<int>((in_img_w - 1))), 0);
int access_x_2 =
max(min(input_x + 1, static_cast<int>((in_img_w - 1))), 0);
int access_x_3 =
max(min(input_x + 2, static_cast<int>((in_img_w - 1))), 0);
const T* in_pos_0 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_0 * num_channels + channel_id];
const T* in_pos_1 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_1 * num_channels + channel_id];
const T* in_pos_2 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_2 * num_channels + channel_id];
const T* in_pos_3 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_3 * num_channels + channel_id];
coefficients[k] = Kecubic_interp(
in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t);
}
out[out_id_h * output_w + out_id_w] =
static_cast<T>(Kecubic_interp(coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_t));
}
}
}
template <typename T>
__global__ void KeTrilinearInterpFw(const T* in,
const size_t in_img_d,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_d,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_d,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idt, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w;
out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h;
out_img_idx = tid % out_img_w;
} else {
out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels);
out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) /
(out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idt = align_flag
? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5)
: static_cast<int>(ratio_d * out_img_idt);
in_img_idt = (in_img_idt > 0) ? in_img_idt : 0;
int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0;
T src_d = ratio_d * (out_img_idt + 0.5) - 0.5;
src_d = (src_d > 0) ? src_d : 0;
T d1lambda =
align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt;
T d2lambda = 1.f - d1lambda;
int in_img_idy = align_flag
? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5)
: static_cast<int>(ratio_h * out_img_idy);
in_img_idy = (in_img_idy > 0) ? in_img_idy : 0;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T src_h = ratio_h * (out_img_idy + 0.5) - 0.5;
src_h = (src_h > 0) ? src_h : 0;
T h1lambda =
align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
if (data_layout == DataLayout::kNCHW) {
int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size +
(in_img_idt * in_img_h + in_img_idy) * in_img_w +
in_img_idx;
const T* in_pos1 = &in[in_pos1_idx];
int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w;
const T* in_pos2 = &in[in_pos2_idx];
// trilinear interpolation
out[out_id_h * output_w + out_id_w] =
d2lambda *
(h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id]) +
h1lambda * (w2lambda * in_pos1[h_id * in_img_w] +
w1lambda * in_pos1[h_id * in_img_w + w_id])) +
d1lambda *
(h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id]) +
h1lambda * (w2lambda * in_pos2[h_id * in_img_w] +
w1lambda * in_pos2[h_id * in_img_w + w_id]));
} else {
int in_pos1_idx = out_id_h * input_w +
in_img_idt * in_img_h * in_img_w * num_channels +
in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id;
const T* in_pos1 = &in[in_pos1_idx];
int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels;
const T* in_pos2 = &in[in_pos2_idx];
// trilinear interpolation
out[out_id_h * output_w + out_id_w] =
d2lambda *
(h2lambda * (w2lambda * in_pos1[0] +
w1lambda * in_pos1[w_id * num_channels]) +
h1lambda * (w2lambda * in_pos1[h_id * in_img_w * num_channels] +
w1lambda * in_pos1[h_id * in_img_w * num_channels +
w_id * num_channels])) +
d1lambda *
(h2lambda * (w2lambda * in_pos2[0] +
w1lambda * in_pos2[w_id * num_channels]) +
h1lambda * (w2lambda * in_pos2[h_id * in_img_w * num_channels] +
w1lambda * in_pos2[h_id * in_img_w * num_channels +
w_id * num_channels]));
}
}
}
template <typename T>
__global__ void KeNearestNeighbor3DInterpFw(const T* in,
const size_t in_img_d,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_d,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_d,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const DataLayout data_layout) {
int nthreads = output_h * output_w; // ncdhw
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idt, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w;
out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h;
out_img_idx = tid % out_img_w;
} else {
out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels);
out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) /
(out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idt = (align_corners)
? static_cast<int>(ratio_d * out_img_idt + 0.5)
: static_cast<int>(ratio_d * out_img_idt);
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
if (data_layout == DataLayout::kNCHW) {
out[tid] = in[out_id_h * input_w + channel_id * in_img_size +
in_img_idt * in_img_h * in_img_w + in_img_idy * in_img_w +
in_img_idx];
} else {
out[tid] = in[out_id_h * input_w +
in_img_idt * in_img_h * in_img_w * num_channels +
in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
}
}
}
template <typename T, typename Context>
static void Interpolate1DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_w = new_size[0];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
scale_w = scale_data[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
} else {
if (scale.size() > 0) {
scale_w = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
}
}
if (scale_w > 0.) {
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
paddle::framework::TensorCopySync(
*out_size, paddle::platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_w = size_data[0];
}
}
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_w};
} else {
dim_out = {n, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_w == out_w) {
paddle::framework::TensorCopy(input, dev_ctx.GetPlace(), output);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0)
: static_cast<float>(new_scale_w);
}
int64_t in_cw = c * in_w;
int64_t out_cw = c * out_w;
auto pixelNum = n * out_cw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("linear" == interp_method) {
hipLaunchKernelGGL(( KeLinearInterpFw<T>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_w,
in_cw,
output_data,
out_w,
n,
out_cw,
c,
ratio_w,
align_corners,
align_mode,
data_layout);
}
}
template <typename T, typename Context>
static void Interpolate2DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
float scale_h = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_h = new_size[0];
out_w = new_size[1];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
if (scale_data.size() > 1) {
scale_h = scale_data[0];
scale_w = scale_data[1];
} else {
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
} else {
if (scale.size() > 1) {
scale_w = scale[1];
scale_h = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
}
}
if (scale_w > 0. && scale_h > 0.) {
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
paddle::framework::TensorCopySync(
*out_size, paddle::platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
}
PADDLE_ENFORCE_GT(
out_h,
0,
errors::InvalidArgument("out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_h, out_w};
} else {
dim_out = {n, out_h, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_h == out_h && in_w == out_w) {
paddle::framework::TensorCopy(input, dev_ctx.GetPlace(), output);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int64_t in_hw = in_h * in_w;
int64_t out_hw = out_h * out_w;
int64_t in_chw = c * in_hw;
int64_t out_chw = c * out_hw;
auto pixelNum = n * out_chw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("nearest" == interp_method) {
if (data_layout == DataLayout::kNCHW) {
// get launch 3D config
int nc = n * c;
backends::gpu::GpuLaunchConfig config_3d =
backends::gpu::GetGpuLaunchConfig3D(dev_ctx, nc, out_h, out_w);
hipLaunchKernelGGL(( KeNearestNeighborInterpNCHWFw<T>), dim3(config_3d.block_per_grid),
dim3(config_3d.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_h,
in_w,
output_data,
out_h,
out_w,
nc,
ratio_h,
ratio_w,
align_corners);
} else {
int64_t cw = c * out_w;
auto interp_divmods = funcs::FastDivModForInterpolate(c, out_chw, cw);
hipLaunchKernelGGL(( KeNearestNeighborInterpFw<T>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
interp_divmods);
}
} else if ("bilinear" == interp_method) {
dim3 thread_num = config.thread_per_block;
#ifdef WITH_NV_JETSON
if (config.compute_capability == 53 || config.compute_capability == 62) {
thread_num = 512;
}
#endif
const T align_type_value = (align_mode == 0 && !align_corners) ? 0.5f : 0;
if (data_layout == DataLayout::kNCHW) {
// get launch 3D config
int nc = n * c;
backends::gpu::GpuLaunchConfig config_3d =
backends::gpu::GetGpuLaunchConfig3D(dev_ctx, nc, out_h, out_w);
hipLaunchKernelGGL(( KeBilinearInterpNCHWFw<T>), dim3(config_3d.block_per_grid),
dim3(config_3d.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_h,
in_w,
output_data,
out_h,
out_w,
nc,
ratio_h,
ratio_w,
align_type_value);
} else {
int64_t cw = c * out_w;
auto interp_divmods = funcs::FastDivModForInterpolate(c, out_chw, cw);
hipLaunchKernelGGL(( KeBilinearInterpFw<T>)
, dim3(config.block_per_grid), dim3(thread_num), 0, dev_ctx.stream(),
input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_type_value,
interp_divmods);
}
} else if ("bicubic" == interp_method) {
#ifdef __HIPCC__
constexpr int thread_per_block = 256;
#else
constexpr int thread_per_block = 512;
#endif
hipLaunchKernelGGL(( KeBicubicInterpFw<T>)
, dim3(config.block_per_grid), dim3(thread_per_block), 0, dev_ctx.stream(),
input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
data_layout);
}
}
template <typename T, typename Context>
static void Interpolate3DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
float scale_d = -1;
float scale_h = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
if (scale_data.size() > 1) {
scale_d = scale_data[0];
scale_h = scale_data[1];
scale_w = scale_data[2];
} else {
scale_d = scale_data[0];
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0,
true,
errors::InvalidArgument(
"The scale_d in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
} else {
if (scale.size() > 1) {
scale_d = scale[0];
scale_h = scale[1];
scale_w = scale[2];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0,
true,
errors::InvalidArgument(
"The scale_d in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
}
}
if (scale_d > 0. && scale_h > 0. && scale_w > 0.) {
out_d = static_cast<int>(in_d * scale_d);
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
paddle::framework::TensorCopySync(
*out_size, paddle::platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_d = size_data[0];
out_h = size_data[1];
out_w = size_data[2];
}
}
PADDLE_ENFORCE_GT(
out_d,
0,
errors::InvalidArgument("out_d in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_h,
0,
errors::InvalidArgument("out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_d, out_h, out_w};
} else {
dim_out = {n, out_d, out_h, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_d == out_d && in_h == out_h && in_w == out_w) {
paddle::framework::TensorCopy(input, dev_ctx.GetPlace(), output);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int64_t in_dhw = in_d * in_h * in_w;
int64_t out_dhw = out_d * out_h * out_w;
int64_t in_cdhw = c * in_dhw;
int64_t out_cdhw = c * out_dhw;
auto pixelNum = n * out_cdhw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("trilinear" == interp_method) {
hipLaunchKernelGGL(( KeTrilinearInterpFw<T>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_d,
in_h,
in_w,
n,
in_cdhw,
output_data,
out_d,
out_h,
out_w,
n,
out_cdhw,
c,
ratio_d,
ratio_h,
ratio_w,
align_corners,
align_mode,
data_layout);
} else if ("nearest" == interp_method) {
hipLaunchKernelGGL(( KeNearestNeighbor3DInterpFw<T>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_d,
in_h,
in_w,
n,
in_cdhw,
output_data,
out_d,
out_h,
out_w,
n,
out_cdhw,
c,
ratio_d,
ratio_h,
ratio_w,
align_corners,
data_layout);
}
}
template <typename T, typename Context>
void InterpolateKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto input_dims = x.dims();
if (input_dims.size() == 3) { // 1D interpolation
Interpolate1DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
} else if (input_dims.size() == 4) { // 2D interpolation
Interpolate2DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
} else if (input_dims.size() == 5) { // 3D interpolation
Interpolate3DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
}
template <typename T, typename Context>
void BilinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void NearestInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void TrilinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void LinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void BicubicInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
} // namespace phi
PD_REGISTER_KERNEL(bilinear_interp_v2,
GPU,
ALL_LAYOUT,
phi::BilinearInterpKernel,
float,
double,
int) {}
PD_REGISTER_KERNEL(nearest_interp_v2,
GPU,
ALL_LAYOUT,
phi::NearestInterpKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(trilinear_interp_v2,
GPU,
ALL_LAYOUT,
phi::TrilinearInterpKernel,
float,
double,
int) {}
PD_REGISTER_KERNEL(linear_interp_v2,
GPU,
ALL_LAYOUT,
phi::LinearInterpKernel,
float,
double,
int) {}
PD_REGISTER_KERNEL(bicubic_interp_v2,
GPU,
ALL_LAYOUT,
phi::BicubicInterpKernel,
float,
double,
int) {}
| 8aaf08a2703054bf628b1b737efabf32711037aa.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/fast_divmod.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/interpolate_function.h"
#include "paddle/phi/kernels/interpolate_kernel.h"
namespace phi {
using paddle::platform::FastDivMod;
template <typename T>
__forceinline__ __device__ void PreCalculatorForLinearInterpInputIndex(
int* in_img_idx,
int* x_id,
T* lambda1,
T* lambda2,
T src_x,
const int in_img_x) {
src_x = (src_x > 0) ? src_x : 0.f;
*in_img_idx = static_cast<int>(src_x);
*x_id = (*in_img_idx < in_img_x - 1) ? 1 : 0;
*lambda1 = src_x - *in_img_idx;
*lambda2 = 1.f - *lambda1;
}
template <typename T>
__global__ void KeLinearInterpFw(const T* in,
const size_t in_img_w,
const size_t input_w,
T* out,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_w,
const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idx = tid % out_img_w;
} else {
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
if (data_layout == DataLayout::kNCHW) {
const T* in_pos =
&in[out_id_h * out_id_w + channel_id * in_img_size + in_img_idx];
// linear interpolation
out[out_id_h * output_w + out_id_w] =
w2lambda * in_pos[0] + w1lambda * in_pos[w_id];
} else {
const T* in_pos =
&in[out_id_h * input_w + in_img_idx * num_channels + channel_id];
// linear interpolation
out[out_id_h * output_w + out_id_w] =
w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels];
}
}
}
template <typename T>
__global__ void KeNearestNeighborInterpNCHWFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t nc,
const float ratio_h,
const float ratio_w,
const bool align_corners) {
int out_img_idx = threadIdx.x + blockIdx.x * blockDim.x;
int out_img_idy = threadIdx.y + blockIdx.y * blockDim.y;
int nc_id = threadIdx.z + blockIdx.z * blockDim.z;
int nc_stride = blockDim.z * gridDim.z;
// nearest_sampling by multiple read in_addr and write to out_addr
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_index = (nc_id * in_img_h + in_img_idy) * in_img_w + in_img_idx;
int in_index_stride = nc_stride * in_img_h * in_img_w;
int out_index = (nc_id * out_img_h + out_img_idy) * out_img_w + out_img_idx;
int out_index_stride = nc_stride * out_img_h * out_img_w;
// prevent from multiple threads writing
if (out_img_idx < out_img_w && out_img_idy < out_img_h) {
while (nc_id < nc) {
out[out_index] = in[in_index];
in_index += in_index_stride;
out_index += out_index_stride;
nc_id += nc_stride;
}
}
}
template <typename T>
__global__ void KeNearestNeighborInterpFw(
const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
funcs::FastDivModForInterpolate divmods) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int in_img_size = in_img_h * in_img_w;
int out_img_size = out_img_h * out_img_w;
for (; tid < nthreads; tid += stride) {
auto out_id_divmod = divmods.output_w_div.Divmod(tid);
int out_id_h = out_id_divmod.val[0];
int out_id_w = out_id_divmod.val[1];
int channel_id = divmods.channels_div.Divmod(tid).val[1];
auto outimg_id_divmod = divmods.output_wc_div.Divmod(out_id_w);
int out_img_idy = outimg_id_divmod.val[0];
int out_img_idx =
divmods.channels_div.Divmod(outimg_id_divmod.val[1]).val[0];
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
out[tid] = in[out_id_h * input_w + in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
}
}
template <typename T>
__global__ void KeBilinearInterpFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const T align_type_value,
funcs::FastDivModForInterpolate divmods) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
auto out_id_divmod = divmods.output_w_div.Divmod(tid);
int out_id_h = out_id_divmod.val[0];
int out_id_w = out_id_divmod.val[1];
int channel_id = divmods.channels_div.Divmod(tid).val[1];
auto outimg_id_divmod = divmods.output_wc_div.Divmod(out_id_w);
int out_img_idy = outimg_id_divmod.val[0];
int out_img_idx =
divmods.channels_div.Divmod(outimg_id_divmod.val[1]).val[0];
int in_img_idx, in_img_idy, h_id, w_id;
T h1lambda, w1lambda, h2lambda, w2lambda;
T src_w = ratio_w * (out_img_idx + align_type_value) - align_type_value;
T src_h = ratio_h * (out_img_idy + align_type_value) - align_type_value;
PreCalculatorForLinearInterpInputIndex(
&in_img_idx, &w_id, &w1lambda, &w2lambda, src_w, in_img_w);
PreCalculatorForLinearInterpInputIndex(
&in_img_idy, &h_id, &h1lambda, &h2lambda, src_h, in_img_h);
// bilinear interpolation
const T* in_pos =
&in[out_id_h * input_w + in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
out[tid] =
h2lambda *
(w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]) +
h1lambda *
(w2lambda * in_pos[h_id * in_img_w * num_channels] +
w1lambda *
in_pos[h_id * in_img_w * num_channels + w_id * num_channels]);
}
}
template <typename T>
__global__ void KeBilinearInterpNCHWFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t nc,
const float ratio_h,
const float ratio_w,
const T align_type_value) {
int out_img_idx = threadIdx.x + blockIdx.x * blockDim.x;
int out_img_idy = threadIdx.y + blockIdx.y * blockDim.y;
int nc_id = threadIdx.z + blockIdx.z * blockDim.z;
int nc_stride = blockDim.z * gridDim.z;
int in_img_idx, in_img_idy, h_id, w_id;
T h1lambda, w1lambda, h2lambda, w2lambda;
T src_w = ratio_w * (out_img_idx + align_type_value) - align_type_value;
T src_h = ratio_h * (out_img_idy + align_type_value) - align_type_value;
PreCalculatorForLinearInterpInputIndex(
&in_img_idx, &w_id, &w1lambda, &w2lambda, src_w, in_img_w);
PreCalculatorForLinearInterpInputIndex(
&in_img_idy, &h_id, &h1lambda, &h2lambda, src_h, in_img_h);
int in_index = (nc_id * in_img_h + in_img_idy) * in_img_w + in_img_idx;
int in_index_stride = nc_stride * in_img_h * in_img_w;
int out_index = (nc_id * out_img_h + out_img_idy) * out_img_w + out_img_idx;
int out_index_stride = nc_stride * out_img_h * out_img_w;
// prevent from multiple threads writing
if (out_img_idx < out_img_w && out_img_idy < out_img_h) {
while (nc_id < nc) {
const T* in_pos = &in[in_index];
out[out_index] =
h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) +
h1lambda * (w2lambda * in_pos[h_id * in_img_w] +
w1lambda * in_pos[h_id * in_img_w + w_id]);
in_index += in_index_stride;
out_index += out_index_stride;
nc_id += nc_stride;
}
}
}
template <typename T>
__device__ __forceinline__ static T Kecubic_interp(
const T x0, const T x1, const T x2, const T x3, T t) {
T coeffs[4];
T a = -0.75;
T x_1 = t;
T x_2 = 1.0 - t;
coeffs[0] = funcs::CubicConvolution2<T>(x_1 + 1.0, a);
coeffs[1] = funcs::CubicConvolution1<T>(x_1, a);
coeffs[2] = funcs::CubicConvolution1<T>(x_2, a);
coeffs[3] = funcs::CubicConvolution2<T>(x_2 + 1.0, a);
return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3];
}
template <typename T>
__global__ void KeBicubicInterpFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idy = (out_id_w % out_img_size) / out_img_w;
out_img_idx = tid % out_img_w;
} else {
out_img_idy = out_id_w / (out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
T in_img_idy = align_corners
? static_cast<T>(ratio_h * out_img_idy)
: static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5);
int input_y = floorf(in_img_idy);
const T y_t = in_img_idy - input_y;
T in_img_idx = align_corners
? static_cast<T>(ratio_w * out_img_idx)
: static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5);
int input_x = floorf(in_img_idx);
const T x_t = in_img_idx - input_x;
T coefficients[4];
const T* in_pos_0;
const T* in_pos_1;
const T* in_pos_2;
const T* in_pos_3;
int access_x_0;
if (data_layout == DataLayout::kNCHW) {
for (int k = 0; k < 4; k++) {
int access_y =
max(min(input_y - 1 + k, static_cast<int>(in_img_h - 1)), 0);
access_x_0 = max(min(input_x - 1, static_cast<int>(in_img_w - 1)), 0);
int access_x_1 =
max(min(input_x + 0, static_cast<int>(in_img_w - 1)), 0);
int access_x_2 =
max(min(input_x + 1, static_cast<int>(in_img_w - 1)), 0);
int access_x_3 =
max(min(input_x + 2, static_cast<int>(in_img_w - 1)), 0);
in_pos_0 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_0];
in_pos_1 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_1];
in_pos_2 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_2];
in_pos_3 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_3];
coefficients[k] = Kecubic_interp<T>(
in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t);
}
out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_t);
} else {
for (int k = 0; k < 4; k++) {
int access_y =
max(min(input_y - 1 + k, static_cast<int>((in_img_h - 1))), 0);
int access_x_0 =
max(min(input_x - 1, static_cast<int>((in_img_w - 1))), 0);
int access_x_1 =
max(min(input_x + 0, static_cast<int>((in_img_w - 1))), 0);
int access_x_2 =
max(min(input_x + 1, static_cast<int>((in_img_w - 1))), 0);
int access_x_3 =
max(min(input_x + 2, static_cast<int>((in_img_w - 1))), 0);
const T* in_pos_0 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_0 * num_channels + channel_id];
const T* in_pos_1 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_1 * num_channels + channel_id];
const T* in_pos_2 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_2 * num_channels + channel_id];
const T* in_pos_3 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_3 * num_channels + channel_id];
coefficients[k] = Kecubic_interp(
in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t);
}
out[out_id_h * output_w + out_id_w] =
static_cast<T>(Kecubic_interp(coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_t));
}
}
}
template <typename T>
__global__ void KeTrilinearInterpFw(const T* in,
const size_t in_img_d,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_d,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_d,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idt, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w;
out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h;
out_img_idx = tid % out_img_w;
} else {
out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels);
out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) /
(out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idt = align_flag
? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5)
: static_cast<int>(ratio_d * out_img_idt);
in_img_idt = (in_img_idt > 0) ? in_img_idt : 0;
int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0;
T src_d = ratio_d * (out_img_idt + 0.5) - 0.5;
src_d = (src_d > 0) ? src_d : 0;
T d1lambda =
align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt;
T d2lambda = 1.f - d1lambda;
int in_img_idy = align_flag
? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5)
: static_cast<int>(ratio_h * out_img_idy);
in_img_idy = (in_img_idy > 0) ? in_img_idy : 0;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T src_h = ratio_h * (out_img_idy + 0.5) - 0.5;
src_h = (src_h > 0) ? src_h : 0;
T h1lambda =
align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
if (data_layout == DataLayout::kNCHW) {
int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size +
(in_img_idt * in_img_h + in_img_idy) * in_img_w +
in_img_idx;
const T* in_pos1 = &in[in_pos1_idx];
int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w;
const T* in_pos2 = &in[in_pos2_idx];
// trilinear interpolation
out[out_id_h * output_w + out_id_w] =
d2lambda *
(h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id]) +
h1lambda * (w2lambda * in_pos1[h_id * in_img_w] +
w1lambda * in_pos1[h_id * in_img_w + w_id])) +
d1lambda *
(h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id]) +
h1lambda * (w2lambda * in_pos2[h_id * in_img_w] +
w1lambda * in_pos2[h_id * in_img_w + w_id]));
} else {
int in_pos1_idx = out_id_h * input_w +
in_img_idt * in_img_h * in_img_w * num_channels +
in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id;
const T* in_pos1 = &in[in_pos1_idx];
int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels;
const T* in_pos2 = &in[in_pos2_idx];
// trilinear interpolation
out[out_id_h * output_w + out_id_w] =
d2lambda *
(h2lambda * (w2lambda * in_pos1[0] +
w1lambda * in_pos1[w_id * num_channels]) +
h1lambda * (w2lambda * in_pos1[h_id * in_img_w * num_channels] +
w1lambda * in_pos1[h_id * in_img_w * num_channels +
w_id * num_channels])) +
d1lambda *
(h2lambda * (w2lambda * in_pos2[0] +
w1lambda * in_pos2[w_id * num_channels]) +
h1lambda * (w2lambda * in_pos2[h_id * in_img_w * num_channels] +
w1lambda * in_pos2[h_id * in_img_w * num_channels +
w_id * num_channels]));
}
}
}
template <typename T>
__global__ void KeNearestNeighbor3DInterpFw(const T* in,
const size_t in_img_d,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_d,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_d,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const DataLayout data_layout) {
int nthreads = output_h * output_w; // ncdhw
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idt, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w;
out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h;
out_img_idx = tid % out_img_w;
} else {
out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels);
out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) /
(out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idt = (align_corners)
? static_cast<int>(ratio_d * out_img_idt + 0.5)
: static_cast<int>(ratio_d * out_img_idt);
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
if (data_layout == DataLayout::kNCHW) {
out[tid] = in[out_id_h * input_w + channel_id * in_img_size +
in_img_idt * in_img_h * in_img_w + in_img_idy * in_img_w +
in_img_idx];
} else {
out[tid] = in[out_id_h * input_w +
in_img_idt * in_img_h * in_img_w * num_channels +
in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
}
}
}
template <typename T, typename Context>
static void Interpolate1DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_w = new_size[0];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
scale_w = scale_data[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
} else {
if (scale.size() > 0) {
scale_w = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
}
}
if (scale_w > 0.) {
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
paddle::framework::TensorCopySync(
*out_size, paddle::platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_w = size_data[0];
}
}
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_w};
} else {
dim_out = {n, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_w == out_w) {
paddle::framework::TensorCopy(input, dev_ctx.GetPlace(), output);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0)
: static_cast<float>(new_scale_w);
}
int64_t in_cw = c * in_w;
int64_t out_cw = c * out_w;
auto pixelNum = n * out_cw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("linear" == interp_method) {
KeLinearInterpFw<T><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_w,
in_cw,
output_data,
out_w,
n,
out_cw,
c,
ratio_w,
align_corners,
align_mode,
data_layout);
}
}
template <typename T, typename Context>
static void Interpolate2DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
float scale_h = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_h = new_size[0];
out_w = new_size[1];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
if (scale_data.size() > 1) {
scale_h = scale_data[0];
scale_w = scale_data[1];
} else {
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
} else {
if (scale.size() > 1) {
scale_w = scale[1];
scale_h = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
}
}
if (scale_w > 0. && scale_h > 0.) {
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
paddle::framework::TensorCopySync(
*out_size, paddle::platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
}
PADDLE_ENFORCE_GT(
out_h,
0,
errors::InvalidArgument("out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_h, out_w};
} else {
dim_out = {n, out_h, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_h == out_h && in_w == out_w) {
paddle::framework::TensorCopy(input, dev_ctx.GetPlace(), output);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int64_t in_hw = in_h * in_w;
int64_t out_hw = out_h * out_w;
int64_t in_chw = c * in_hw;
int64_t out_chw = c * out_hw;
auto pixelNum = n * out_chw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("nearest" == interp_method) {
if (data_layout == DataLayout::kNCHW) {
// get launch 3D config
int nc = n * c;
backends::gpu::GpuLaunchConfig config_3d =
backends::gpu::GetGpuLaunchConfig3D(dev_ctx, nc, out_h, out_w);
KeNearestNeighborInterpNCHWFw<T><<<config_3d.block_per_grid,
config_3d.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_h,
in_w,
output_data,
out_h,
out_w,
nc,
ratio_h,
ratio_w,
align_corners);
} else {
int64_t cw = c * out_w;
auto interp_divmods = funcs::FastDivModForInterpolate(c, out_chw, cw);
KeNearestNeighborInterpFw<T><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
interp_divmods);
}
} else if ("bilinear" == interp_method) {
dim3 thread_num = config.thread_per_block;
#ifdef WITH_NV_JETSON
if (config.compute_capability == 53 || config.compute_capability == 62) {
thread_num = 512;
}
#endif
const T align_type_value = (align_mode == 0 && !align_corners) ? 0.5f : 0;
if (data_layout == DataLayout::kNCHW) {
// get launch 3D config
int nc = n * c;
backends::gpu::GpuLaunchConfig config_3d =
backends::gpu::GetGpuLaunchConfig3D(dev_ctx, nc, out_h, out_w);
KeBilinearInterpNCHWFw<T><<<config_3d.block_per_grid,
config_3d.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_h,
in_w,
output_data,
out_h,
out_w,
nc,
ratio_h,
ratio_w,
align_type_value);
} else {
int64_t cw = c * out_w;
auto interp_divmods = funcs::FastDivModForInterpolate(c, out_chw, cw);
KeBilinearInterpFw<T>
<<<config.block_per_grid, thread_num, 0, dev_ctx.stream()>>>(
input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_type_value,
interp_divmods);
}
} else if ("bicubic" == interp_method) {
#ifdef __HIPCC__
constexpr int thread_per_block = 256;
#else
constexpr int thread_per_block = 512;
#endif
KeBicubicInterpFw<T>
<<<config.block_per_grid, thread_per_block, 0, dev_ctx.stream()>>>(
input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
data_layout);
}
}
template <typename T, typename Context>
static void Interpolate3DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
float scale_d = -1;
float scale_h = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
if (scale_data.size() > 1) {
scale_d = scale_data[0];
scale_h = scale_data[1];
scale_w = scale_data[2];
} else {
scale_d = scale_data[0];
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0,
true,
errors::InvalidArgument(
"The scale_d in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
} else {
if (scale.size() > 1) {
scale_d = scale[0];
scale_h = scale[1];
scale_w = scale[2];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0,
true,
errors::InvalidArgument(
"The scale_d in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
}
}
if (scale_d > 0. && scale_h > 0. && scale_w > 0.) {
out_d = static_cast<int>(in_d * scale_d);
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
paddle::framework::TensorCopySync(
*out_size, paddle::platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_d = size_data[0];
out_h = size_data[1];
out_w = size_data[2];
}
}
PADDLE_ENFORCE_GT(
out_d,
0,
errors::InvalidArgument("out_d in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_h,
0,
errors::InvalidArgument("out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_d, out_h, out_w};
} else {
dim_out = {n, out_d, out_h, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_d == out_d && in_h == out_h && in_w == out_w) {
paddle::framework::TensorCopy(input, dev_ctx.GetPlace(), output);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int64_t in_dhw = in_d * in_h * in_w;
int64_t out_dhw = out_d * out_h * out_w;
int64_t in_cdhw = c * in_dhw;
int64_t out_cdhw = c * out_dhw;
auto pixelNum = n * out_cdhw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("trilinear" == interp_method) {
KeTrilinearInterpFw<T><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_d,
in_h,
in_w,
n,
in_cdhw,
output_data,
out_d,
out_h,
out_w,
n,
out_cdhw,
c,
ratio_d,
ratio_h,
ratio_w,
align_corners,
align_mode,
data_layout);
} else if ("nearest" == interp_method) {
KeNearestNeighbor3DInterpFw<T><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_d,
in_h,
in_w,
n,
in_cdhw,
output_data,
out_d,
out_h,
out_w,
n,
out_cdhw,
c,
ratio_d,
ratio_h,
ratio_w,
align_corners,
data_layout);
}
}
template <typename T, typename Context>
void InterpolateKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto input_dims = x.dims();
if (input_dims.size() == 3) { // 1D interpolation
Interpolate1DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
} else if (input_dims.size() == 4) { // 2D interpolation
Interpolate2DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
} else if (input_dims.size() == 5) { // 3D interpolation
Interpolate3DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
}
template <typename T, typename Context>
void BilinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void NearestInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void TrilinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void LinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void BicubicInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
} // namespace phi
PD_REGISTER_KERNEL(bilinear_interp_v2,
GPU,
ALL_LAYOUT,
phi::BilinearInterpKernel,
float,
double,
int) {}
PD_REGISTER_KERNEL(nearest_interp_v2,
GPU,
ALL_LAYOUT,
phi::NearestInterpKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(trilinear_interp_v2,
GPU,
ALL_LAYOUT,
phi::TrilinearInterpKernel,
float,
double,
int) {}
PD_REGISTER_KERNEL(linear_interp_v2,
GPU,
ALL_LAYOUT,
phi::LinearInterpKernel,
float,
double,
int) {}
PD_REGISTER_KERNEL(bicubic_interp_v2,
GPU,
ALL_LAYOUT,
phi::BicubicInterpKernel,
float,
double,
int) {}
|
c9bacce69816f48b8c761b2b41f557881a3fbe31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tensorrt/plugin/yolo_box_head_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
inline __device__ float SigmoidGPU(const float& x) {
return 1.0f / (1.0f + __expf(-x));
}
__global__ void YoloBoxHeadKernel(const float* input, float* output,
const int grid_size_x, const int grid_size_y,
const int class_num, const int anchors_num) {
int x_id = blockIdx.x * blockDim.x + threadIdx.x;
int y_id = blockIdx.y * blockDim.y + threadIdx.y;
int z_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x_id >= grid_size_x) || (y_id >= grid_size_y) || (z_id >= anchors_num)) {
return;
}
const int grids_num = grid_size_x * grid_size_y;
const int bbindex = y_id * grid_size_x + x_id;
// objectness
output[bbindex + grids_num * (z_id * (5 + class_num) + 4)] =
SigmoidGPU(input[bbindex + grids_num * (z_id * (5 + class_num) + 4)]);
// x
output[bbindex + grids_num * (z_id * (5 + class_num) + 0)] =
SigmoidGPU(input[bbindex + grids_num * (z_id * (5 + class_num) + 0)]);
// y
output[bbindex + grids_num * (z_id * (5 + class_num) + 1)] =
SigmoidGPU(input[bbindex + grids_num * (z_id * (5 + class_num) + 1)]);
// w
output[bbindex + grids_num * (z_id * (5 + class_num) + 2)] =
__expf(input[bbindex + grids_num * (z_id * (5 + class_num) + 2)]);
// h
output[bbindex + grids_num * (z_id * (5 + class_num) + 3)] =
__expf(input[bbindex + grids_num * (z_id * (5 + class_num) + 3)]);
// Probabilities of classes
for (int i = 0; i < class_num; ++i) {
output[bbindex + grids_num * (z_id * (5 + class_num) + (5 + i))] =
SigmoidGPU(
input[bbindex + grids_num * (z_id * (5 + class_num) + (5 + i))]);
}
}
int YoloBoxHeadPlugin::enqueue(int batch_size, const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs,
#else
void* const* outputs,
#endif
void* workspace,
hipStream_t stream) TRT_NOEXCEPT {
const int h = input_dims_[0].d[1];
const int w = input_dims_[0].d[2];
const int grid_size_x = w;
const int grid_size_y = h;
const int anchors_num = anchors_.size() / 2;
const float* input_data = static_cast<const float*>(inputs[0]);
float* output_data = static_cast<float*>(outputs[0]);
const int volume = input_dims_[0].d[0] * h * w;
dim3 block(16, 16, 4);
dim3 grid((grid_size_x / block.x) + 1, (grid_size_y / block.y) + 1,
(anchors_num / block.z) + 1);
for (int n = 0; n < batch_size; n++) {
hipLaunchKernelGGL(( YoloBoxHeadKernel), dim3(grid), dim3(block), 0, stream,
input_data + n * volume, output_data + n * volume, grid_size_x,
grid_size_y, class_num_, anchors_num);
}
return 0;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| c9bacce69816f48b8c761b2b41f557881a3fbe31.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tensorrt/plugin/yolo_box_head_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
inline __device__ float SigmoidGPU(const float& x) {
return 1.0f / (1.0f + __expf(-x));
}
__global__ void YoloBoxHeadKernel(const float* input, float* output,
const int grid_size_x, const int grid_size_y,
const int class_num, const int anchors_num) {
int x_id = blockIdx.x * blockDim.x + threadIdx.x;
int y_id = blockIdx.y * blockDim.y + threadIdx.y;
int z_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x_id >= grid_size_x) || (y_id >= grid_size_y) || (z_id >= anchors_num)) {
return;
}
const int grids_num = grid_size_x * grid_size_y;
const int bbindex = y_id * grid_size_x + x_id;
// objectness
output[bbindex + grids_num * (z_id * (5 + class_num) + 4)] =
SigmoidGPU(input[bbindex + grids_num * (z_id * (5 + class_num) + 4)]);
// x
output[bbindex + grids_num * (z_id * (5 + class_num) + 0)] =
SigmoidGPU(input[bbindex + grids_num * (z_id * (5 + class_num) + 0)]);
// y
output[bbindex + grids_num * (z_id * (5 + class_num) + 1)] =
SigmoidGPU(input[bbindex + grids_num * (z_id * (5 + class_num) + 1)]);
// w
output[bbindex + grids_num * (z_id * (5 + class_num) + 2)] =
__expf(input[bbindex + grids_num * (z_id * (5 + class_num) + 2)]);
// h
output[bbindex + grids_num * (z_id * (5 + class_num) + 3)] =
__expf(input[bbindex + grids_num * (z_id * (5 + class_num) + 3)]);
// Probabilities of classes
for (int i = 0; i < class_num; ++i) {
output[bbindex + grids_num * (z_id * (5 + class_num) + (5 + i))] =
SigmoidGPU(
input[bbindex + grids_num * (z_id * (5 + class_num) + (5 + i))]);
}
}
int YoloBoxHeadPlugin::enqueue(int batch_size, const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs,
#else
void* const* outputs,
#endif
void* workspace,
cudaStream_t stream) TRT_NOEXCEPT {
const int h = input_dims_[0].d[1];
const int w = input_dims_[0].d[2];
const int grid_size_x = w;
const int grid_size_y = h;
const int anchors_num = anchors_.size() / 2;
const float* input_data = static_cast<const float*>(inputs[0]);
float* output_data = static_cast<float*>(outputs[0]);
const int volume = input_dims_[0].d[0] * h * w;
dim3 block(16, 16, 4);
dim3 grid((grid_size_x / block.x) + 1, (grid_size_y / block.y) + 1,
(anchors_num / block.z) + 1);
for (int n = 0; n < batch_size; n++) {
YoloBoxHeadKernel<<<grid, block, 0, stream>>>(
input_data + n * volume, output_data + n * volume, grid_size_x,
grid_size_y, class_num_, anchors_num);
}
return 0;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
fc11a8f6af47e8284bd43324a078656d824b21f6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdint.h>
#include <hip/hip_runtime.h>
#define PI 3.14159265359
// #define HEIGHT 256
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void mappingLogistic_kernel( const int nWidth, const int nHeight, cudaP xMin, cudaP xMax,
cudaP yMin, cudaP yMax,cudaP *startingPoints, cudaP *graphPoints ){
int tid = blockIdx.x + threadIdx.x*gridDim.x;
__shared__ unsigned int mappedPoints[ %(HEIGHT)s ];
mappedPoints[threadIdx.x] = 0;
__syncthreads();
cudaP val = startingPoints[ threadIdx.x + blockIdx.x*blockDim.x];
cudaP k = (xMax - xMin)/(nWidth-1)*blockIdx.x + xMin;
int nValues = 1500;
cudaP yFactor = cudaP(nHeight)/(yMax-yMin);
int yPix;
for (int i=0; i<100000; i++) val = k*val*(1-val); //Tranciente
for (int i=0; i<nValues; i++ ){
if ( val>=yMin and val <=yMax){
yPix = int((val-yMin)*yFactor);
if (yPix<nHeight and yPix>=0) mappedPoints[yPix] += 1;
}
val = k*val*(1-val);
}
cudaP value;
if (mappedPoints[threadIdx.x]>=1) value = log(cudaP(mappedPoints[threadIdx.x]));
else value = 0.0f;
graphPoints[tid] = value;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void mask_kernel( int xMin, int xMax, int yMin, int yMax, int *maskPoints){
int t_x = blockIdx.x*blockDim.x + threadIdx.x;
int t_y = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_x + t_y*blockDim.x*gridDim.x;
int val;
if ( (t_x<xMax && t_x>xMin) && (t_y<yMax && t_y>yMin) ) val = 0;
else val = 1;
maskPoints[tid] = val;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void plot_kernel( int jMin, int jMax, int iMin, int iMax, cudaP *graphPoints, int *maskPoints, cudaP *plotData){
int t_x = blockIdx.x*blockDim.x + threadIdx.x;
int t_y = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_x + t_y*blockDim.x*gridDim.x;
cudaP val=graphPoints[tid];
if ( (t_x>=jMin and t_x<jMax) and (t_y>=iMin and t_y<iMax) ) val = 1-val;
plotData[tid] = val;
} | fc11a8f6af47e8284bd43324a078656d824b21f6.cu | #include <stdint.h>
#include <cuda.h>
#define PI 3.14159265359
// #define HEIGHT 256
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void mappingLogistic_kernel( const int nWidth, const int nHeight, cudaP xMin, cudaP xMax,
cudaP yMin, cudaP yMax,cudaP *startingPoints, cudaP *graphPoints ){
int tid = blockIdx.x + threadIdx.x*gridDim.x;
__shared__ unsigned int mappedPoints[ %(HEIGHT)s ];
mappedPoints[threadIdx.x] = 0;
__syncthreads();
cudaP val = startingPoints[ threadIdx.x + blockIdx.x*blockDim.x];
cudaP k = (xMax - xMin)/(nWidth-1)*blockIdx.x + xMin;
int nValues = 1500;
cudaP yFactor = cudaP(nHeight)/(yMax-yMin);
int yPix;
for (int i=0; i<100000; i++) val = k*val*(1-val); //Tranciente
for (int i=0; i<nValues; i++ ){
if ( val>=yMin and val <=yMax){
yPix = int((val-yMin)*yFactor);
if (yPix<nHeight and yPix>=0) mappedPoints[yPix] += 1;
}
val = k*val*(1-val);
}
cudaP value;
if (mappedPoints[threadIdx.x]>=1) value = log(cudaP(mappedPoints[threadIdx.x]));
else value = 0.0f;
graphPoints[tid] = value;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void mask_kernel( int xMin, int xMax, int yMin, int yMax, int *maskPoints){
int t_x = blockIdx.x*blockDim.x + threadIdx.x;
int t_y = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_x + t_y*blockDim.x*gridDim.x;
int val;
if ( (t_x<xMax && t_x>xMin) && (t_y<yMax && t_y>yMin) ) val = 0;
else val = 1;
maskPoints[tid] = val;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void plot_kernel( int jMin, int jMax, int iMin, int iMax, cudaP *graphPoints, int *maskPoints, cudaP *plotData){
int t_x = blockIdx.x*blockDim.x + threadIdx.x;
int t_y = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_x + t_y*blockDim.x*gridDim.x;
cudaP val=graphPoints[tid];
if ( (t_x>=jMin and t_x<jMax) and (t_y>=iMin and t_y<iMax) ) val = 1-val;
plotData[tid] = val;
} |
23559b85f383fb2eb13f17c01ba0e36c028e5753.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ctdetLayer.h>
__device__ float Logist(float data){ return 1./(1. + exp(-data)); }
__global__ void CTdetforward_kernel(const float *hm, const float *reg,const float *wh ,
float *output,const int w,const int h,const int classes,const int kernerl_size,const float visthresh ) {
int idx = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (idx >= w*h) return;
int padding = kernerl_size/2;
int offset = - padding /2;
int stride = w*h;
int grid_x = idx % w ;
int grid_y = idx / w ;
int cls,l,m;
float c_x,c_y;
for (cls = 0; cls < classes; ++cls )
{
int objIndex = stride * cls + idx;
float objProb = hm[objIndex];
float max=-1;
int max_index =0;
for(l=0 ;l < kernerl_size ; ++l)
for(m=0 ; m < kernerl_size ; ++m){
int cur_x = offset + l + grid_x;
int cur_y = offset + m + grid_y;
int cur_index = cur_y * w + cur_x + stride*cls;
int valid = (cur_x>=0 && cur_x < w && cur_y >=0 && cur_y <h );
float val = (valid !=0 ) ? Logist(hm[cur_index]): -1;
max_index = (val > max) ? cur_index : max_index;
max = (val > max ) ? val: max ;
}
objProb = Logist(objProb);
if((max_index == objIndex) && (objProb > visthresh)){
int resCount = (int)atomicAdd(output,1);
//printf("%d",resCount);
char* data = (char * )output + sizeof(float) + resCount*sizeof(Detection);
Detection* det = (Detection*)(data);
c_x = grid_x + reg[idx] ; c_y = grid_y + reg[idx+stride];
det->bbox.x1 = (c_x - wh[idx]/2)*4;
det->bbox.y1 = (c_y - wh[idx+stride]/2)*4 ;
det->bbox.x2 = (c_x + wh[idx]/2)*4;
det->bbox.y2 = (c_y + wh[idx+stride]/2)*4;
det->classId = cls;
det->prob = objProb;
}
}
}
__global__ void CTfaceforward_kernel(const float *hm, const float *wh,const float *reg,const float* landmarks,
float *output,const int w,const int h,const int classes,const int kernerl_size,const float visthresh ) {
int idx = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (idx >= w*h) return;
int padding = kernerl_size/2;
int offset = - padding /2;
int stride = w*h;
int grid_x = idx % w ;
int grid_y = idx / w ;
int cls,l,m,mark_id;
float c_x,c_y,scale_w,scale_h;
for (cls = 0; cls < classes; ++cls )
{
int objIndex = stride * cls + idx;
float objProb = hm[objIndex];
float max=-1;
int max_index =0;
for(l=0 ;l < kernerl_size ; ++l)
for(m=0 ; m < kernerl_size ; ++m){
int cur_x = offset + l + grid_x;
int cur_y = offset + m + grid_y;
int cur_index = cur_y * w + cur_x + stride*cls;
int valid = (cur_x>=0 && cur_x < w && cur_y >=0 && cur_y <h );
float val = (valid !=0 ) ? hm[cur_index]: -1;
max_index = (val > max) ? cur_index : max_index;
max = (val > max ) ? val: max ;
}
//printf("%f\n",objProb);
if((max_index == objIndex) && (objProb > visthresh)){
int resCount = (int)atomicAdd(output,1);
//printf("%d",resCount);
char* data = (char * )output + sizeof(float) + resCount*sizeof(Detection);
Detection* det = (Detection*)(data);
c_x = (grid_x + reg[idx+stride] + 0.5)*4 ; c_y = (grid_y + reg[idx] + 0.5) * 4;
scale_w = expf(wh[idx+stride]) * 4 ; scale_h = expf(wh[idx]) * 4;
det->bbox.x1 = c_x - scale_w/2;
det->bbox.y1 = c_y - scale_h/2 ;
det->bbox.x2 = c_x + scale_w/2;
det->bbox.y2 = c_y + scale_h/2;
det->prob = objProb;
det->classId = cls;
for(mark_id=0 ; mark_id < 5 ; ++mark_id ){
det->marks[mark_id].x = det->bbox.x1 + landmarks[idx + (2*mark_id+1)*stride]*scale_w;
det->marks[mark_id].y = det->bbox.y1 + landmarks[idx + (2*mark_id)*stride]*scale_h;
}
}
}
}
void CTdetforward_gpu(const float *hm, const float *reg,const float *wh ,float *output,
const int w,const int h,const int classes,const int kernerl_size, const float visthresh ){
uint num = w * h;
hipLaunchKernelGGL(( CTdetforward_kernel), dim3(cudaGridSize(num)),dim3(BLOCK), 0, 0, hm,reg,wh,output,w,h,classes,kernerl_size,visthresh);
}
void CTfaceforward_gpu(const float *hm, const float *wh,const float *reg,const float* landmarks,float *output,
const int w,const int h,const int classes,const int kernerl_size, const float visthresh ){
uint num = w * h;
hipLaunchKernelGGL(( CTfaceforward_kernel), dim3(cudaGridSize(num)),dim3(BLOCK), 0, 0, hm,wh,reg,landmarks,output,w,h,classes,kernerl_size,visthresh);
}
| 23559b85f383fb2eb13f17c01ba0e36c028e5753.cu | #include <ctdetLayer.h>
__device__ float Logist(float data){ return 1./(1. + exp(-data)); }
__global__ void CTdetforward_kernel(const float *hm, const float *reg,const float *wh ,
float *output,const int w,const int h,const int classes,const int kernerl_size,const float visthresh ) {
int idx = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (idx >= w*h) return;
int padding = kernerl_size/2;
int offset = - padding /2;
int stride = w*h;
int grid_x = idx % w ;
int grid_y = idx / w ;
int cls,l,m;
float c_x,c_y;
for (cls = 0; cls < classes; ++cls )
{
int objIndex = stride * cls + idx;
float objProb = hm[objIndex];
float max=-1;
int max_index =0;
for(l=0 ;l < kernerl_size ; ++l)
for(m=0 ; m < kernerl_size ; ++m){
int cur_x = offset + l + grid_x;
int cur_y = offset + m + grid_y;
int cur_index = cur_y * w + cur_x + stride*cls;
int valid = (cur_x>=0 && cur_x < w && cur_y >=0 && cur_y <h );
float val = (valid !=0 ) ? Logist(hm[cur_index]): -1;
max_index = (val > max) ? cur_index : max_index;
max = (val > max ) ? val: max ;
}
objProb = Logist(objProb);
if((max_index == objIndex) && (objProb > visthresh)){
int resCount = (int)atomicAdd(output,1);
//printf("%d",resCount);
char* data = (char * )output + sizeof(float) + resCount*sizeof(Detection);
Detection* det = (Detection*)(data);
c_x = grid_x + reg[idx] ; c_y = grid_y + reg[idx+stride];
det->bbox.x1 = (c_x - wh[idx]/2)*4;
det->bbox.y1 = (c_y - wh[idx+stride]/2)*4 ;
det->bbox.x2 = (c_x + wh[idx]/2)*4;
det->bbox.y2 = (c_y + wh[idx+stride]/2)*4;
det->classId = cls;
det->prob = objProb;
}
}
}
__global__ void CTfaceforward_kernel(const float *hm, const float *wh,const float *reg,const float* landmarks,
float *output,const int w,const int h,const int classes,const int kernerl_size,const float visthresh ) {
int idx = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (idx >= w*h) return;
int padding = kernerl_size/2;
int offset = - padding /2;
int stride = w*h;
int grid_x = idx % w ;
int grid_y = idx / w ;
int cls,l,m,mark_id;
float c_x,c_y,scale_w,scale_h;
for (cls = 0; cls < classes; ++cls )
{
int objIndex = stride * cls + idx;
float objProb = hm[objIndex];
float max=-1;
int max_index =0;
for(l=0 ;l < kernerl_size ; ++l)
for(m=0 ; m < kernerl_size ; ++m){
int cur_x = offset + l + grid_x;
int cur_y = offset + m + grid_y;
int cur_index = cur_y * w + cur_x + stride*cls;
int valid = (cur_x>=0 && cur_x < w && cur_y >=0 && cur_y <h );
float val = (valid !=0 ) ? hm[cur_index]: -1;
max_index = (val > max) ? cur_index : max_index;
max = (val > max ) ? val: max ;
}
//printf("%f\n",objProb);
if((max_index == objIndex) && (objProb > visthresh)){
int resCount = (int)atomicAdd(output,1);
//printf("%d",resCount);
char* data = (char * )output + sizeof(float) + resCount*sizeof(Detection);
Detection* det = (Detection*)(data);
c_x = (grid_x + reg[idx+stride] + 0.5)*4 ; c_y = (grid_y + reg[idx] + 0.5) * 4;
scale_w = expf(wh[idx+stride]) * 4 ; scale_h = expf(wh[idx]) * 4;
det->bbox.x1 = c_x - scale_w/2;
det->bbox.y1 = c_y - scale_h/2 ;
det->bbox.x2 = c_x + scale_w/2;
det->bbox.y2 = c_y + scale_h/2;
det->prob = objProb;
det->classId = cls;
for(mark_id=0 ; mark_id < 5 ; ++mark_id ){
det->marks[mark_id].x = det->bbox.x1 + landmarks[idx + (2*mark_id+1)*stride]*scale_w;
det->marks[mark_id].y = det->bbox.y1 + landmarks[idx + (2*mark_id)*stride]*scale_h;
}
}
}
}
void CTdetforward_gpu(const float *hm, const float *reg,const float *wh ,float *output,
const int w,const int h,const int classes,const int kernerl_size, const float visthresh ){
uint num = w * h;
CTdetforward_kernel<<<cudaGridSize(num),BLOCK>>>(hm,reg,wh,output,w,h,classes,kernerl_size,visthresh);
}
void CTfaceforward_gpu(const float *hm, const float *wh,const float *reg,const float* landmarks,float *output,
const int w,const int h,const int classes,const int kernerl_size, const float visthresh ){
uint num = w * h;
CTfaceforward_kernel<<<cudaGridSize(num),BLOCK>>>(hm,wh,reg,landmarks,output,w,h,classes,kernerl_size,visthresh);
}
|
bd56211630863e514da7c0328f9797ccb651734d.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
#ifndef OPENCV_TINY_GPU_MODULE
namespace filter
{
template void linearRow<short4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif
#endif /* CUDA_DISABLER */
| bd56211630863e514da7c0328f9797ccb651734d.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
#ifndef OPENCV_TINY_GPU_MODULE
namespace filter
{
template void linearRow<short4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif
#endif /* CUDA_DISABLER */
|
12db5ffe9cbf45d62af6edc8ad48afa5af610635.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "shrink.hpp"
#include <hip/hip_complex.h>
#include "utility.hpp"
#include "helper_math.h"
namespace csmri
{
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifndef blockDimx
#define blockDimx 16
#endif
#ifndef blockDimy
#define blockDimy 16
#endif
#ifndef blockDimz
#define blockDimz 1
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void __shrink1(
float2* src,
float2* dst,
int dimx,
int dimy,
int dimz,
float Lambda)
{
//3D global index
int3 idx = make_int3(
blockIdx.x*blockDim.x+threadIdx.x,
blockIdx.y*blockDim.y+threadIdx.y,
blockIdx.z*blockDim.z+threadIdx.z);
//1D global index
int index = idx.z*dimy*dimx
+ idx.y*dimx
+ idx.x;
//Check valid indices
if (idx.x >= dimx || idx.y >= dimy || idx.z >= dimz)
return;
//Do computing
float2 t = src[index];
float s = sqrtf(t.x*t.x + t.y*t.y);
float ss = s - Lambda;
ss = ss*(ss>0.0f);
dst[index] = (s==0.0f)?t:t*ss/s;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void shrink1(
float2* src,
float2* dst,
int dimx,
int dimy,
int dimz,
float Lambda)
{
dim3 numBlocks(
(dimx/blockDimx + ((dimx%blockDimx)?1:0)),
(dimy/blockDimy + ((dimy%blockDimy)?1:0)),
(dimz/blockDimz + ((dimz%blockDimz)?1:0)) );
dim3 numThreads(blockDimx, blockDimy, blockDimz);
hipLaunchKernelGGL(( __shrink1), dim3(numBlocks), dim3(numThreads), 0, 0, src, dst, dimx, dimy, dimz, Lambda);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void __shrink2(
float2* srcA,
float2* srcB,
float2* dstA,
float2* dstB,
int dimx,
int dimy,
int dimz,
float Lambda)
{
//3D global index
int3 idx = make_int3(
blockIdx.x*blockDim.x+threadIdx.x,
blockIdx.y*blockDim.y+threadIdx.y,
blockIdx.z*blockDim.z+threadIdx.z);
//1D global index
int index = idx.z*dimy*dimx
+ idx.y*dimx
+ idx.x;
//Check valid indices
if (idx.x >= dimx || idx.y >= dimy || idx.z >= dimz)
return;
//Do computing
float2 a = srcA[index];
float2 b = srcB[index];
float s = sqrtf(a.x*a.x + a.y*a.y + b.x*b.x +b.y*b.y);
float ss = s - Lambda;
ss = ss*(ss>0.0f);
dstA[index] = (s==0.0f)?a:a*ss/s;
dstB[index] = (s==0.0f)?b:b*ss/s;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void shrink2(
float2* srcA, float2* srcB,
float2* dstA, float2* dstB,
int dimx,
int dimy,
int dimz,
float Lambda)
{
dim3 numBlocks(
(dimx/blockDimx + ((dimx%blockDimx)?1:0)),
(dimy/blockDimy + ((dimy%blockDimy)?1:0)),
(dimz/blockDimz + ((dimz%blockDimz)?1:0)) );
dim3 numThreads(blockDimx, blockDimy, blockDimz);
hipLaunchKernelGGL(( __shrink2), dim3(numBlocks), dim3(numThreads), 0, 0, srcA, srcB, dstA, dstB, dimx, dimy, dimz, Lambda);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} | 12db5ffe9cbf45d62af6edc8ad48afa5af610635.cu | #include "shrink.hpp"
#include <cuComplex.h>
#include "utility.hpp"
#include "helper_math.h"
namespace csmri
{
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifndef blockDimx
#define blockDimx 16
#endif
#ifndef blockDimy
#define blockDimy 16
#endif
#ifndef blockDimz
#define blockDimz 1
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void __shrink1(
float2* src,
float2* dst,
int dimx,
int dimy,
int dimz,
float Lambda)
{
//3D global index
int3 idx = make_int3(
blockIdx.x*blockDim.x+threadIdx.x,
blockIdx.y*blockDim.y+threadIdx.y,
blockIdx.z*blockDim.z+threadIdx.z);
//1D global index
int index = idx.z*dimy*dimx
+ idx.y*dimx
+ idx.x;
//Check valid indices
if (idx.x >= dimx || idx.y >= dimy || idx.z >= dimz)
return;
//Do computing
float2 t = src[index];
float s = sqrtf(t.x*t.x + t.y*t.y);
float ss = s - Lambda;
ss = ss*(ss>0.0f);
dst[index] = (s==0.0f)?t:t*ss/s;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void shrink1(
float2* src,
float2* dst,
int dimx,
int dimy,
int dimz,
float Lambda)
{
dim3 numBlocks(
(dimx/blockDimx + ((dimx%blockDimx)?1:0)),
(dimy/blockDimy + ((dimy%blockDimy)?1:0)),
(dimz/blockDimz + ((dimz%blockDimz)?1:0)) );
dim3 numThreads(blockDimx, blockDimy, blockDimz);
__shrink1<<<numBlocks, numThreads>>>(src, dst, dimx, dimy, dimz, Lambda);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void __shrink2(
float2* srcA,
float2* srcB,
float2* dstA,
float2* dstB,
int dimx,
int dimy,
int dimz,
float Lambda)
{
//3D global index
int3 idx = make_int3(
blockIdx.x*blockDim.x+threadIdx.x,
blockIdx.y*blockDim.y+threadIdx.y,
blockIdx.z*blockDim.z+threadIdx.z);
//1D global index
int index = idx.z*dimy*dimx
+ idx.y*dimx
+ idx.x;
//Check valid indices
if (idx.x >= dimx || idx.y >= dimy || idx.z >= dimz)
return;
//Do computing
float2 a = srcA[index];
float2 b = srcB[index];
float s = sqrtf(a.x*a.x + a.y*a.y + b.x*b.x +b.y*b.y);
float ss = s - Lambda;
ss = ss*(ss>0.0f);
dstA[index] = (s==0.0f)?a:a*ss/s;
dstB[index] = (s==0.0f)?b:b*ss/s;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void shrink2(
float2* srcA, float2* srcB,
float2* dstA, float2* dstB,
int dimx,
int dimy,
int dimz,
float Lambda)
{
dim3 numBlocks(
(dimx/blockDimx + ((dimx%blockDimx)?1:0)),
(dimy/blockDimy + ((dimy%blockDimy)?1:0)),
(dimz/blockDimz + ((dimz%blockDimz)?1:0)) );
dim3 numThreads(blockDimx, blockDimy, blockDimz);
__shrink2<<<numBlocks, numThreads>>>(srcA, srcB, dstA, dstB, dimx, dimy, dimz, Lambda);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} |
03cb842f0fb12aa1af3fe88488171d3d501f90b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by root on 23/03/2020.
//
#include "../Matrix.cuh"
#include <iostream>
__global__ void matrixHadamard(double *a, double *b, double *c, int cr, int cc){
long x = blockIdx.x * blockDim.x + threadIdx.x; // col
long y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = a[y * cc + x] * b[y * cc + x];
}
}
Matrix Matrix::hadamard(Matrix m){
if(this->Columns != m.Columns || this->Rows != m.Rows){
std::cout << "Cannot multiply hadamard. Invalid size";
exit(-1);
}
static double* c;
c = (double*) calloc(this->Rows*m.Columns,sizeof(double));
//Define os endereoes da memria de vdeo
double *d_a, *d_b, *d_c;
//Define o tamanho de cada matriz na memria
int aSize = this->Rows*this->Columns*sizeof(double);
int bSize = m.Rows*m.Columns*sizeof(double);
int cSize = this->Rows*m.Columns*sizeof(double);
//Aloca espao na memria de vdeo
hipMalloc((void**)&d_a, aSize);
hipMalloc((void**)&d_b, bSize);
hipMalloc((void**)&d_c, cSize);
//Move as 2 matrizes para a memria de vdeo alocada
hipMemcpy(d_a, this->Value, aSize, hipMemcpyHostToDevice);
hipMemcpy(d_b, m.Value, bSize, hipMemcpyHostToDevice);
//Define as dimenses
dim3 dimBlock(32,32); // 32x32 -> 1024 Threads
dim3 dimGrid(this->Rows,m.Columns);
//Efetua a multiplicao
hipLaunchKernelGGL(( matrixHadamard), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, this->Rows, m.Columns);
//Copia o resultado de volta
hipMemcpy(c, d_c, cSize, hipMemcpyDeviceToHost);
//Limpa a memria de vdeo
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
//Salva
return {m.Columns, this->Rows, c};
}
| 03cb842f0fb12aa1af3fe88488171d3d501f90b7.cu | //
// Created by root on 23/03/2020.
//
#include "../Matrix.cuh"
#include <iostream>
__global__ void matrixHadamard(double *a, double *b, double *c, int cr, int cc){
long x = blockIdx.x * blockDim.x + threadIdx.x; // col
long y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = a[y * cc + x] * b[y * cc + x];
}
}
Matrix Matrix::hadamard(Matrix m){
if(this->Columns != m.Columns || this->Rows != m.Rows){
std::cout << "Cannot multiply hadamard. Invalid size";
exit(-1);
}
static double* c;
c = (double*) calloc(this->Rows*m.Columns,sizeof(double));
//Define os endereçoes da memória de vídeo
double *d_a, *d_b, *d_c;
//Define o tamanho de cada matriz na memória
int aSize = this->Rows*this->Columns*sizeof(double);
int bSize = m.Rows*m.Columns*sizeof(double);
int cSize = this->Rows*m.Columns*sizeof(double);
//Aloca espaço na memória de vídeo
cudaMalloc((void**)&d_a, aSize);
cudaMalloc((void**)&d_b, bSize);
cudaMalloc((void**)&d_c, cSize);
//Move as 2 matrizes para a memória de vídeo alocada
cudaMemcpy(d_a, this->Value, aSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, m.Value, bSize, cudaMemcpyHostToDevice);
//Define as dimensões
dim3 dimBlock(32,32); // 32x32 -> 1024 Threads
dim3 dimGrid(this->Rows,m.Columns);
//Efetua a multiplicação
matrixHadamard<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, this->Rows, m.Columns);
//Copia o resultado de volta
cudaMemcpy(c, d_c, cSize, cudaMemcpyDeviceToHost);
//Limpa a memória de vídeo
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//Salva
return {m.Columns, this->Rows, c};
}
|
8a7d19d260e2bec704d0f70f67f125b44069ed5f.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (C) 2016 Yusuke Suzuki <yusuke.suzuki@sslab.ics.keio.ac.jp>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "dump_memory.cuh"
#include "utility.h"
#include "utility/util.cu.h"
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
namespace gloop {
__device__ void dumpMemoryInternal(const void* ptr, std::size_t size)
{
BEGIN_SINGLE_THREAD
{
const unsigned char* cursor = reinterpret_cast<const unsigned char*>(ptr);
for (std::size_t i = 0; i < size; ++i) {
if ((i + 1) == size) {
printf("%02x\n", (unsigned)(cursor[i]));
} else {
printf("%02x ", (unsigned)(cursor[i]));
}
}
}
END_SINGLE_THREAD
}
} // namespace gloop
| 8a7d19d260e2bec704d0f70f67f125b44069ed5f.cu | /*
Copyright (C) 2016 Yusuke Suzuki <yusuke.suzuki@sslab.ics.keio.ac.jp>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "dump_memory.cuh"
#include "utility.h"
#include "utility/util.cu.h"
#include <cstdio>
#include <cuda.h>
#include <cuda_runtime.h>
namespace gloop {
__device__ void dumpMemoryInternal(const void* ptr, std::size_t size)
{
BEGIN_SINGLE_THREAD
{
const unsigned char* cursor = reinterpret_cast<const unsigned char*>(ptr);
for (std::size_t i = 0; i < size; ++i) {
if ((i + 1) == size) {
printf("%02x\n", (unsigned)(cursor[i]));
} else {
printf("%02x ", (unsigned)(cursor[i]));
}
}
}
END_SINGLE_THREAD
}
} // namespace gloop
|
eab259940cc870246b8d35b36cdb433d22ff8915.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic";
#include "gen_hip.cuh"
static const int __tb_InitializeGraph2 = TB_SIZE;
static const int __tb_KCoreStep1 = TB_SIZE;
__global__ void InitializeGraph2(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_current_degree, DynamicBitset& bitset_current_degree)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_InitializeGraph2;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
}
// FP: "9 -> 10;
// FP: "12 -> 13;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "13 -> 14;
__shared__ struct { ; } _np_closure [TB_SIZE];
// FP: "14 -> 15;
// FP: "15 -> 16;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "18 -> 19;
// FP: "19 -> 20;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "20 -> 21;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "21 -> 22;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "24 -> 25;
__syncthreads();
// FP: "25 -> 26;
while (true)
{
// FP: "26 -> 27;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "29 -> 30;
__syncthreads();
// FP: "30 -> 31;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "31 -> 32;
__syncthreads();
// FP: "32 -> 33;
break;
}
// FP: "34 -> 35;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "39 -> 40;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "42 -> 43;
assert(nps.tb.src < __kernel_tb_size);
// FP: "43 -> 44;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dest_node;
dest_node = graph.getAbsDestination(current_edge);
atomicTestAdd(&p_current_degree[dest_node], (uint32_t)1);
bitset_current_degree.set(dest_node);
}
}
// FP: "51 -> 52;
__syncthreads();
}
// FP: "53 -> 54;
// FP: "54 -> 55;
{
const int warpid = threadIdx.x / 32;
// FP: "55 -> 56;
const int _np_laneid = cub::LaneId();
// FP: "56 -> 57;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dest_node;
dest_node = graph.getAbsDestination(current_edge);
atomicTestAdd(&p_current_degree[dest_node], (uint32_t)1);
bitset_current_degree.set(dest_node);
}
}
}
// FP: "74 -> 75;
__syncthreads();
// FP: "75 -> 76;
}
// FP: "76 -> 77;
__syncthreads();
// FP: "77 -> 78;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "78 -> 79;
while (_np.work())
{
// FP: "79 -> 80;
int _np_i =0;
// FP: "80 -> 81;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "81 -> 82;
__syncthreads();
// FP: "82 -> 83;
// FP: "83 -> 84;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
current_edge= nps.fg.itvalue[_np_i];
{
index_type dest_node;
dest_node = graph.getAbsDestination(current_edge);
atomicTestAdd(&p_current_degree[dest_node], (uint32_t)1);
bitset_current_degree.set(dest_node);
}
}
// FP: "92 -> 93;
_np.execute_round_done(ITSIZE);
// FP: "93 -> 94;
__syncthreads();
}
// FP: "95 -> 96;
assert(threadIdx.x < __kernel_tb_size);
}
// FP: "97 -> 98;
}
__global__ void InitializeGraph1(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_current_degree, uint8_t * p_flag, uint32_t * p_trim)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_flag[src] = true;
p_trim[src] = 0;
p_current_degree[src] = 0;
}
}
// FP: "9 -> 10;
}
__global__ void KCoreStep2(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_current_degree, uint8_t * p_flag, uint32_t * p_trim)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_flag[src])
{
if (p_trim[src] > 0)
{
p_current_degree[src] = p_current_degree[src] - p_trim[src];
}
}
p_trim[src] = 0;
}
}
// FP: "12 -> 13;
}
__global__ void KCoreStep1(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t local_k_core_num, uint32_t * p_current_degree, uint8_t * p_flag, uint32_t * p_trim, HGAccumulator<unsigned int> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_KCoreStep1;
__shared__ hipcub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_accum.thread_entry();
// FP: "7 -> 8;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "8 -> 9;
bool pop = src < __end;
// FP: "9 -> 10;
if (pop)
{
if (p_flag[src])
{
if (p_current_degree[src] < local_k_core_num)
{
p_flag[src] = false;
DGAccumulator_accum.reduce( 1);
}
else
{
pop = false;
}
}
else
{
pop = false;
}
}
// FP: "17 -> 18;
// FP: "20 -> 21;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "21 -> 22;
__shared__ struct { ; } _np_closure [TB_SIZE];
// FP: "22 -> 23;
// FP: "23 -> 24;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "26 -> 27;
// FP: "27 -> 28;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "28 -> 29;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "29 -> 30;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "32 -> 33;
__syncthreads();
// FP: "33 -> 34;
while (true)
{
// FP: "34 -> 35;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "39 -> 40;
__syncthreads();
// FP: "40 -> 41;
break;
}
// FP: "42 -> 43;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "45 -> 46;
__syncthreads();
// FP: "46 -> 47;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "47 -> 48;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "50 -> 51;
assert(nps.tb.src < __kernel_tb_size);
// FP: "51 -> 52;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dst;
dst = graph.getAbsDestination(current_edge);
atomicTestAdd(&p_trim[dst], (uint32_t)1);
}
}
// FP: "59 -> 60;
__syncthreads();
}
// FP: "61 -> 62;
// FP: "62 -> 63;
{
const int warpid = threadIdx.x / 32;
// FP: "63 -> 64;
const int _np_laneid = cub::LaneId();
// FP: "64 -> 65;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dst;
dst = graph.getAbsDestination(current_edge);
atomicTestAdd(&p_trim[dst], (uint32_t)1);
}
}
}
// FP: "82 -> 83;
__syncthreads();
// FP: "83 -> 84;
}
// FP: "84 -> 85;
__syncthreads();
// FP: "85 -> 86;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "86 -> 87;
while (_np.work())
{
// FP: "87 -> 88;
int _np_i =0;
// FP: "88 -> 89;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "89 -> 90;
__syncthreads();
// FP: "90 -> 91;
// FP: "91 -> 92;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
current_edge= nps.fg.itvalue[_np_i];
{
index_type dst;
dst = graph.getAbsDestination(current_edge);
atomicTestAdd(&p_trim[dst], (uint32_t)1);
}
}
// FP: "100 -> 101;
_np.execute_round_done(ITSIZE);
// FP: "101 -> 102;
__syncthreads();
}
// FP: "103 -> 104;
assert(threadIdx.x < __kernel_tb_size);
}
// FP: "107 -> 108;
DGAccumulator_accum.thread_exit<hipcub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts);
// FP: "108 -> 109;
}
__global__ void KCoreSanityCheck(CSRGraph graph, unsigned int __begin, unsigned int __end, uint8_t * p_flag, HGAccumulator<uint64_t> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ hipcub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_accum.thread_entry();
// FP: "3 -> 4;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_flag[src])
{
DGAccumulator_accum.reduce( 1);
}
}
}
// FP: "11 -> 12;
DGAccumulator_accum.thread_exit<hipcub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_accum_ts);
// FP: "12 -> 13;
}
void InitializeGraph2_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( InitializeGraph2) , dim3(blocks), dim3(__tb_InitializeGraph2), 0, 0, ctx->gg, __begin, __end, ctx->current_degree.data.gpu_wr_ptr(), *(ctx->current_degree.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph2_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph2_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void InitializeGraph2_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph2_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void InitializeGraph2_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph2_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void InitializeGraph1_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( InitializeGraph1) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, ctx->current_degree.data.gpu_wr_ptr(), ctx->flag.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph1_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph1_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void InitializeGraph1_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph1_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void InitializeGraph1_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph1_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void KCoreStep2_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( KCoreStep2) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, ctx->current_degree.data.gpu_wr_ptr(), ctx->flag.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void KCoreStep2_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreStep2_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void KCoreStep2_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreStep2_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void KCoreStep2_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreStep2_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void KCoreStep1_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, uint32_t local_k_core_num, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<unsigned int> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
hipLaunchKernelGGL(( KCoreStep1) , dim3(blocks), dim3(__tb_KCoreStep1), 0, 0, ctx->gg, __begin, __end, local_k_core_num, ctx->current_degree.data.gpu_wr_ptr(), ctx->flag.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr(), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void KCoreStep1_allNodes_cuda(unsigned int & DGAccumulator_accum, uint32_t local_k_core_num, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreStep1_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, local_k_core_num, ctx);
// FP: "2 -> 3;
}
void KCoreStep1_masterNodes_cuda(unsigned int & DGAccumulator_accum, uint32_t local_k_core_num, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreStep1_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, local_k_core_num, ctx);
// FP: "2 -> 3;
}
void KCoreStep1_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, uint32_t local_k_core_num, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreStep1_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, local_k_core_num, ctx);
// FP: "2 -> 3;
}
void KCoreSanityCheck_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint64_t> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint64_t> DGAccumulator_accumval = Shared<uint64_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
hipLaunchKernelGGL(( KCoreSanityCheck) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, ctx->flag.data.gpu_wr_ptr(), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void KCoreSanityCheck_allNodes_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreSanityCheck_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void KCoreSanityCheck_masterNodes_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreSanityCheck_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void KCoreSanityCheck_nodesWithEdges_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreSanityCheck_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
| eab259940cc870246b8d35b36cdb433d22ff8915.cu | /*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic";
#include "gen_cuda.cuh"
static const int __tb_InitializeGraph2 = TB_SIZE;
static const int __tb_KCoreStep1 = TB_SIZE;
__global__ void InitializeGraph2(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_current_degree, DynamicBitset& bitset_current_degree)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_InitializeGraph2;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
}
// FP: "9 -> 10;
// FP: "12 -> 13;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "13 -> 14;
__shared__ struct { ; } _np_closure [TB_SIZE];
// FP: "14 -> 15;
// FP: "15 -> 16;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "18 -> 19;
// FP: "19 -> 20;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "20 -> 21;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "21 -> 22;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "24 -> 25;
__syncthreads();
// FP: "25 -> 26;
while (true)
{
// FP: "26 -> 27;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "29 -> 30;
__syncthreads();
// FP: "30 -> 31;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "31 -> 32;
__syncthreads();
// FP: "32 -> 33;
break;
}
// FP: "34 -> 35;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "39 -> 40;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "42 -> 43;
assert(nps.tb.src < __kernel_tb_size);
// FP: "43 -> 44;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dest_node;
dest_node = graph.getAbsDestination(current_edge);
atomicTestAdd(&p_current_degree[dest_node], (uint32_t)1);
bitset_current_degree.set(dest_node);
}
}
// FP: "51 -> 52;
__syncthreads();
}
// FP: "53 -> 54;
// FP: "54 -> 55;
{
const int warpid = threadIdx.x / 32;
// FP: "55 -> 56;
const int _np_laneid = cub::LaneId();
// FP: "56 -> 57;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dest_node;
dest_node = graph.getAbsDestination(current_edge);
atomicTestAdd(&p_current_degree[dest_node], (uint32_t)1);
bitset_current_degree.set(dest_node);
}
}
}
// FP: "74 -> 75;
__syncthreads();
// FP: "75 -> 76;
}
// FP: "76 -> 77;
__syncthreads();
// FP: "77 -> 78;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "78 -> 79;
while (_np.work())
{
// FP: "79 -> 80;
int _np_i =0;
// FP: "80 -> 81;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "81 -> 82;
__syncthreads();
// FP: "82 -> 83;
// FP: "83 -> 84;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
current_edge= nps.fg.itvalue[_np_i];
{
index_type dest_node;
dest_node = graph.getAbsDestination(current_edge);
atomicTestAdd(&p_current_degree[dest_node], (uint32_t)1);
bitset_current_degree.set(dest_node);
}
}
// FP: "92 -> 93;
_np.execute_round_done(ITSIZE);
// FP: "93 -> 94;
__syncthreads();
}
// FP: "95 -> 96;
assert(threadIdx.x < __kernel_tb_size);
}
// FP: "97 -> 98;
}
__global__ void InitializeGraph1(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_current_degree, uint8_t * p_flag, uint32_t * p_trim)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_flag[src] = true;
p_trim[src] = 0;
p_current_degree[src] = 0;
}
}
// FP: "9 -> 10;
}
__global__ void KCoreStep2(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_current_degree, uint8_t * p_flag, uint32_t * p_trim)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_flag[src])
{
if (p_trim[src] > 0)
{
p_current_degree[src] = p_current_degree[src] - p_trim[src];
}
}
p_trim[src] = 0;
}
}
// FP: "12 -> 13;
}
__global__ void KCoreStep1(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t local_k_core_num, uint32_t * p_current_degree, uint8_t * p_flag, uint32_t * p_trim, HGAccumulator<unsigned int> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_KCoreStep1;
__shared__ cub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_accum.thread_entry();
// FP: "7 -> 8;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "8 -> 9;
bool pop = src < __end;
// FP: "9 -> 10;
if (pop)
{
if (p_flag[src])
{
if (p_current_degree[src] < local_k_core_num)
{
p_flag[src] = false;
DGAccumulator_accum.reduce( 1);
}
else
{
pop = false;
}
}
else
{
pop = false;
}
}
// FP: "17 -> 18;
// FP: "20 -> 21;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "21 -> 22;
__shared__ struct { ; } _np_closure [TB_SIZE];
// FP: "22 -> 23;
// FP: "23 -> 24;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "26 -> 27;
// FP: "27 -> 28;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "28 -> 29;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "29 -> 30;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "32 -> 33;
__syncthreads();
// FP: "33 -> 34;
while (true)
{
// FP: "34 -> 35;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "39 -> 40;
__syncthreads();
// FP: "40 -> 41;
break;
}
// FP: "42 -> 43;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "45 -> 46;
__syncthreads();
// FP: "46 -> 47;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "47 -> 48;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "50 -> 51;
assert(nps.tb.src < __kernel_tb_size);
// FP: "51 -> 52;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dst;
dst = graph.getAbsDestination(current_edge);
atomicTestAdd(&p_trim[dst], (uint32_t)1);
}
}
// FP: "59 -> 60;
__syncthreads();
}
// FP: "61 -> 62;
// FP: "62 -> 63;
{
const int warpid = threadIdx.x / 32;
// FP: "63 -> 64;
const int _np_laneid = cub::LaneId();
// FP: "64 -> 65;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dst;
dst = graph.getAbsDestination(current_edge);
atomicTestAdd(&p_trim[dst], (uint32_t)1);
}
}
}
// FP: "82 -> 83;
__syncthreads();
// FP: "83 -> 84;
}
// FP: "84 -> 85;
__syncthreads();
// FP: "85 -> 86;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "86 -> 87;
while (_np.work())
{
// FP: "87 -> 88;
int _np_i =0;
// FP: "88 -> 89;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "89 -> 90;
__syncthreads();
// FP: "90 -> 91;
// FP: "91 -> 92;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
current_edge= nps.fg.itvalue[_np_i];
{
index_type dst;
dst = graph.getAbsDestination(current_edge);
atomicTestAdd(&p_trim[dst], (uint32_t)1);
}
}
// FP: "100 -> 101;
_np.execute_round_done(ITSIZE);
// FP: "101 -> 102;
__syncthreads();
}
// FP: "103 -> 104;
assert(threadIdx.x < __kernel_tb_size);
}
// FP: "107 -> 108;
DGAccumulator_accum.thread_exit<cub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts);
// FP: "108 -> 109;
}
__global__ void KCoreSanityCheck(CSRGraph graph, unsigned int __begin, unsigned int __end, uint8_t * p_flag, HGAccumulator<uint64_t> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ cub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_accum.thread_entry();
// FP: "3 -> 4;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_flag[src])
{
DGAccumulator_accum.reduce( 1);
}
}
}
// FP: "11 -> 12;
DGAccumulator_accum.thread_exit<cub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_accum_ts);
// FP: "12 -> 13;
}
void InitializeGraph2_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
InitializeGraph2 <<<blocks, __tb_InitializeGraph2>>>(ctx->gg, __begin, __end, ctx->current_degree.data.gpu_wr_ptr(), *(ctx->current_degree.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph2_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph2_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void InitializeGraph2_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph2_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void InitializeGraph2_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph2_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void InitializeGraph1_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
InitializeGraph1 <<<blocks, threads>>>(ctx->gg, __begin, __end, ctx->current_degree.data.gpu_wr_ptr(), ctx->flag.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph1_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph1_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void InitializeGraph1_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph1_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void InitializeGraph1_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph1_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void KCoreStep2_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
KCoreStep2 <<<blocks, threads>>>(ctx->gg, __begin, __end, ctx->current_degree.data.gpu_wr_ptr(), ctx->flag.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void KCoreStep2_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreStep2_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void KCoreStep2_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreStep2_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void KCoreStep2_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreStep2_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void KCoreStep1_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, uint32_t local_k_core_num, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<unsigned int> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
KCoreStep1 <<<blocks, __tb_KCoreStep1>>>(ctx->gg, __begin, __end, local_k_core_num, ctx->current_degree.data.gpu_wr_ptr(), ctx->flag.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr(), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void KCoreStep1_allNodes_cuda(unsigned int & DGAccumulator_accum, uint32_t local_k_core_num, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreStep1_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, local_k_core_num, ctx);
// FP: "2 -> 3;
}
void KCoreStep1_masterNodes_cuda(unsigned int & DGAccumulator_accum, uint32_t local_k_core_num, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreStep1_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, local_k_core_num, ctx);
// FP: "2 -> 3;
}
void KCoreStep1_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, uint32_t local_k_core_num, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreStep1_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, local_k_core_num, ctx);
// FP: "2 -> 3;
}
void KCoreSanityCheck_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint64_t> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint64_t> DGAccumulator_accumval = Shared<uint64_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
KCoreSanityCheck <<<blocks, threads>>>(ctx->gg, __begin, __end, ctx->flag.data.gpu_wr_ptr(), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void KCoreSanityCheck_allNodes_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreSanityCheck_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void KCoreSanityCheck_masterNodes_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreSanityCheck_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void KCoreSanityCheck_nodesWithEdges_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
KCoreSanityCheck_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
|
48b86fa883b03841943db993a880a00fd8071ff0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(int *a,int *b,int *c)
{
int tID = blockIdx.x;
if(tID<N)
{
c[tID] = a[tID] + b[tID];
}
} | 48b86fa883b03841943db993a880a00fd8071ff0.cu | #include "includes.h"
__global__ void add(int *a,int *b,int *c)
{
int tID = blockIdx.x;
if(tID<N)
{
c[tID] = a[tID] + b[tID];
}
} |
11a05b7b40d8b17f24ef8f5c45e667ce76b6f51a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 17.05.2018
// @author raver119@gmail.com
//
#include <ops/declarable/helpers/percentile.h>
#include <array/NDArrayFactory.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/DebugHelper.h>
#include <array/ResultSet.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename X>
static _CUDA_G void percentileKernel(void *vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xTadOffsets,
const Nd4jLong numTads, const Nd4jLong tadLength,
void *vz, const Nd4jLong *zShapeInfo, const Nd4jLong zLength,
const Nd4jLong position) {
for (int t = blockIdx.x; t < numTads; t += gridDim.x) {
auto x = reinterpret_cast<X*>(vx) + xTadOffsets[t];
auto z = reinterpret_cast<X*>(vz);
// sort tad
if (tadLength > 1) {
for (int m = 0; m < tadLength; m++) {
if (m % 2 == 0) {
for (int tid = threadIdx.x; tid < tadLength; tid += blockDim.x) {
auto top = 2 * tid + 1;
if (top < tadLength) {
auto t0 = shape::getIndexOffset(top - 1, xTadShapeInfo);
auto t1 = shape::getIndexOffset(top, xTadShapeInfo);
if (x[t0] > x[t1]) {
//swap values
X dz0 = x[t0];
x[t0] = x[t1];
x[t1] = dz0;
}
}
}
} else {
for (int tid = threadIdx.x; tid < tadLength; tid += blockDim.x) {
auto top = 2 * tid + 2;
if (top < tadLength) {
auto t0 = shape::getIndexOffset(top - 1, xTadShapeInfo);
auto t1 = shape::getIndexOffset(top, xTadShapeInfo);
if (x[t0] > x[t1]) {
//swap values
X dz0 = x[t0];
x[t0] = x[t1];
x[t1] = dz0;
}
}
}
}
__syncthreads();
}
}
// saving final value
if (threadIdx.x == 0)
z[shape::getIndexOffset(t, zShapeInfo)] = x[shape::getIndexOffset(position, xTadShapeInfo)];
__syncthreads();
}
}
template <typename T>
static void _percentile(sd::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axis, const float q, const int interpolation) {
const int inputRank = input.rankOf();
if(axis.empty())
for(int i=0; i<inputRank; ++i)
axis.push_back(i);
else
shape::checkDimensions(inputRank, axis);
auto tempArray = input.dup();
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(tempArray.shapeInfo(), axis);
auto tadLength = shape::length(packX.primaryShapeInfo());
const float fraction = 1.f - q / 100.;
Nd4jLong position = 0;
switch(interpolation) {
case 0: // lower
position = static_cast<Nd4jLong>(math::nd4j_ceil<float,T>((tadLength - 1) * fraction));
break;
case 1: // higher
position = static_cast<Nd4jLong>(math::nd4j_floor<float,T>((tadLength - 1) * fraction));
break;
case 2: // nearest
position = static_cast<Nd4jLong>(math::nd4j_round<float,T>((tadLength - 1) * fraction));
break;
}
position = tadLength - position - 1;
hipLaunchKernelGGL(( percentileKernel<T>), dim3(256), dim3(512), 1024, *context->getCudaStream(), tempArray.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), packX.numberOfTads(), tadLength, output.specialBuffer(), output.specialShapeInfo(), output.lengthOf(), position);
sd::DebugHelper::checkErrorCode(context->getCudaStream(), "percentile");
}
void percentile(sd::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axises, const float q, const int interpolation) {
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), _percentile, (context, input, output, axises, q, interpolation), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
}
BUILD_SINGLE_TEMPLATE(template void _percentile, (sd::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axises, const float q, const int interpolation), LIBND4J_TYPES);
}
}
} | 11a05b7b40d8b17f24ef8f5c45e667ce76b6f51a.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 17.05.2018
// @author raver119@gmail.com
//
#include <ops/declarable/helpers/percentile.h>
#include <array/NDArrayFactory.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/DebugHelper.h>
#include <array/ResultSet.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename X>
static _CUDA_G void percentileKernel(void *vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xTadOffsets,
const Nd4jLong numTads, const Nd4jLong tadLength,
void *vz, const Nd4jLong *zShapeInfo, const Nd4jLong zLength,
const Nd4jLong position) {
for (int t = blockIdx.x; t < numTads; t += gridDim.x) {
auto x = reinterpret_cast<X*>(vx) + xTadOffsets[t];
auto z = reinterpret_cast<X*>(vz);
// sort tad
if (tadLength > 1) {
for (int m = 0; m < tadLength; m++) {
if (m % 2 == 0) {
for (int tid = threadIdx.x; tid < tadLength; tid += blockDim.x) {
auto top = 2 * tid + 1;
if (top < tadLength) {
auto t0 = shape::getIndexOffset(top - 1, xTadShapeInfo);
auto t1 = shape::getIndexOffset(top, xTadShapeInfo);
if (x[t0] > x[t1]) {
//swap values
X dz0 = x[t0];
x[t0] = x[t1];
x[t1] = dz0;
}
}
}
} else {
for (int tid = threadIdx.x; tid < tadLength; tid += blockDim.x) {
auto top = 2 * tid + 2;
if (top < tadLength) {
auto t0 = shape::getIndexOffset(top - 1, xTadShapeInfo);
auto t1 = shape::getIndexOffset(top, xTadShapeInfo);
if (x[t0] > x[t1]) {
//swap values
X dz0 = x[t0];
x[t0] = x[t1];
x[t1] = dz0;
}
}
}
}
__syncthreads();
}
}
// saving final value
if (threadIdx.x == 0)
z[shape::getIndexOffset(t, zShapeInfo)] = x[shape::getIndexOffset(position, xTadShapeInfo)];
__syncthreads();
}
}
template <typename T>
static void _percentile(sd::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axis, const float q, const int interpolation) {
const int inputRank = input.rankOf();
if(axis.empty())
for(int i=0; i<inputRank; ++i)
axis.push_back(i);
else
shape::checkDimensions(inputRank, axis);
auto tempArray = input.dup();
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(tempArray.shapeInfo(), axis);
auto tadLength = shape::length(packX.primaryShapeInfo());
const float fraction = 1.f - q / 100.;
Nd4jLong position = 0;
switch(interpolation) {
case 0: // lower
position = static_cast<Nd4jLong>(math::nd4j_ceil<float,T>((tadLength - 1) * fraction));
break;
case 1: // higher
position = static_cast<Nd4jLong>(math::nd4j_floor<float,T>((tadLength - 1) * fraction));
break;
case 2: // nearest
position = static_cast<Nd4jLong>(math::nd4j_round<float,T>((tadLength - 1) * fraction));
break;
}
position = tadLength - position - 1;
percentileKernel<T><<<256, 512, 1024, *context->getCudaStream()>>>(tempArray.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), packX.numberOfTads(), tadLength, output.specialBuffer(), output.specialShapeInfo(), output.lengthOf(), position);
sd::DebugHelper::checkErrorCode(context->getCudaStream(), "percentile");
}
void percentile(sd::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axises, const float q, const int interpolation) {
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), _percentile, (context, input, output, axises, q, interpolation), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
}
BUILD_SINGLE_TEMPLATE(template void _percentile, (sd::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axises, const float q, const int interpolation), LIBND4J_TYPES);
}
}
} |
217eaf76ea7750fea88c8979fb3b5faa49f1d156.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arbor/fvm_types.hpp>
#include <arbor/gpu/gpu_api.hpp>
#include <arbor/gpu/gpu_common.hpp>
#include "matrix_common.hpp"
#include "matrix_fine.hpp"
namespace arb {
namespace gpu {
namespace kernels {
//
// gather and scatter kernels
//
// to[i] = from[p[i]]
template <typename T, typename I>
__global__
void gather(const T* __restrict__ const from,
T* __restrict__ const to,
const I* __restrict__ const p,
unsigned n) {
unsigned i = threadIdx.x + blockDim.x*blockIdx.x;
if (i<n) {
to[i] = from[p[i]];
}
}
// to[p[i]] = from[i]
template <typename T, typename I>
__global__
void scatter(const T* __restrict__ const from,
T* __restrict__ const to,
const I* __restrict__ const p,
unsigned n) {
unsigned i = threadIdx.x + blockDim.x*blockIdx.x;
if (i<n) {
to[p[i]] = from[i];
}
}
/// GPU implementation of Hines matrix assembly.
/// Fine layout.
/// For a given time step size dt:
/// - use the precomputed alpha and alpha_d values to construct the diagonal
/// and off diagonal of the symmetric Hines matrix.
/// - compute the RHS of the linear system to solve.
template <typename T, typename I>
__global__
void assemble_matrix_fine(
T* __restrict__ const d,
T* __restrict__ const rhs,
const T* __restrict__ const invariant_d,
const T* __restrict__ const voltage,
const T* __restrict__ const current,
const T* __restrict__ const conductivity,
const T* __restrict__ const cv_capacitance,
const T* __restrict__ const area,
const I* __restrict__ const cv_to_intdom,
const T* __restrict__ const dt_intdom,
const I* __restrict__ const perm,
unsigned n)
{
const unsigned tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < n) {
// The 1e-3 is a constant of proportionality required to ensure that the
// conductance (gi) values have units S (micro-Siemens).
// See the model documentation in docs/model for more information.
const auto dt = dt_intdom[cv_to_intdom[tid]];
const auto p = dt > 0;
const auto pid = perm[tid];
const auto area_factor = T(1e-3)*area[tid];
const auto gi = T(1e-3)*cv_capacitance[tid]/dt + area_factor*conductivity[tid];
const auto r_d = gi + invariant_d[tid];
const auto r_rhs = gi*voltage[tid] - area_factor*current[tid];
d[pid] = p ? r_d : 0;
rhs[pid] = p ? r_rhs : voltage[tid];
}
}
/// GPU implementation of Hines Matrix solver.
/// Fine-grained tree based solver.
/// Each block solves a set of matricesb iterating over the levels of matrix
/// and perfoming a backward and forward substitution. On each level one thread
/// gets assigned to one branch on this level of a matrix and solves and
/// performs the substitution. Afterwards all threads continue on the next
/// level.
/// To avoid idle threads, one should try that on each level, there is a similar
/// number of branches.
template <typename T>
__global__
void solve_matrix_fine(
T* __restrict__ const rhs,
T* __restrict__ const d,
const T* __restrict__ const u,
const level_metadata* __restrict__ const level_meta,
const fvm_index_type* __restrict__ const level_lengths,
const fvm_index_type* __restrict__ const level_parents,
const fvm_index_type* __restrict__ const block_index,
const fvm_index_type* __restrict__ const num_matrix) // number of packed matrices = number of cells
{
const auto tid = threadIdx.x;
const auto bid = blockIdx.x;
const auto first_level = block_index[bid];
const auto num_levels = block_index[bid + 1] - first_level;
const auto block_level_meta = &level_meta[first_level];
// backward substitution
for (unsigned l=0; l<num_levels-1; ++l) {
// Metadata for this level and the next level
const auto& lvl_meta = block_level_meta[l];
const auto& next_lvl_meta = block_level_meta[l+1];
// Addresses of the first elements of level_lengths and level_parents
// that belong to this level
const auto lvl_lengths = level_lengths + lvl_meta.level_data_index;
const auto lvl_parents = level_parents + lvl_meta.level_data_index;
const unsigned width = lvl_meta.num_branches;
// Perform backward substitution for each branch on this level.
// One thread per branch.
if (tid < width) {
const unsigned len = lvl_lengths[tid];
unsigned pos = lvl_meta.matrix_data_index + tid;
// Zero diagonal term implies dt==0; just leave rhs (for whole matrix)
// alone in that case.
// Each cell has a different `dt`, because we choose time step size
// according to when the next event is arriving at a cell. So, some
// cells require more time steps than others, but we have to solve
// all the matrices at the same time. When a cell finishes, we put a
// `0` on the diagonal to mark that it should not be solved for.
if (d[pos]!=0) {
// each branch perform substitution
for (unsigned i=0; i<len-1; ++i) {
const unsigned next_pos = pos + width;
const auto d_next = d[next_pos];
const auto rhs_next = rhs[next_pos];
const T factor = -u[pos]/d[pos];
d[next_pos] = fma(factor, u[pos], d_next);
rhs[next_pos] = fma(factor, rhs[pos], rhs_next);
pos = next_pos;
}
// Update d and rhs at the parent node of this branch.
// A parent may have more than one contributing to it, so we use
// atomic updates to avoid races conditions.
const unsigned parent_index = next_lvl_meta.matrix_data_index;
const unsigned p = parent_index + lvl_parents[tid];
const T factor = -u[pos] / d[pos];
gpu_atomic_add(d + p, factor*u[pos]);
gpu_atomic_add(rhs + p, factor*rhs[pos]);
}
}
__syncthreads();
}
// Solve the root
{
// The levels are sorted such that the root is the last level
const auto& last_lvl_meta = block_level_meta[num_levels-1];
const auto lvl_lengths = level_lengths + last_lvl_meta.level_data_index;
const unsigned width = num_matrix[bid];
if (tid < width) {
const unsigned len = lvl_lengths[tid];
unsigned pos = last_lvl_meta.matrix_data_index + tid;
if (d[pos]!=0) {
// backward
for (unsigned i=0; i<len-1; ++i) {
const unsigned next_pos = pos + width;
const T factor = -u[pos] / d[pos];
const auto rhs_next = rhs[next_pos];
const auto d_next = d[next_pos];
d[next_pos] = fma(factor, u[pos], d_next);
rhs[next_pos] = fma(factor, rhs[pos], rhs_next);
pos = next_pos;
}
auto rhsp = rhs[pos] / d[pos];
rhs[pos] = rhsp;
pos -= width;
// forward
for (unsigned i=0; i<len-1; ++i) {
rhsp = rhs[pos] - u[pos]*rhsp;
rhsp /= d[pos];
rhs[pos] = rhsp;
pos -= width;
}
}
}
}
// forward substitution
// take great care with loop limits decrementing unsigned counter l
for (unsigned l=num_levels-1; l>0; --l) {
const auto& lvl_meta = block_level_meta[l-1];
// Addresses of the first elements of level_lengths and level_parents
// that belong to this level
const auto lvl_lengths = level_lengths + lvl_meta.level_data_index;
const auto lvl_parents = level_parents + lvl_meta.level_data_index;
const unsigned width = lvl_meta.num_branches;
const unsigned parent_index = block_level_meta[l].matrix_data_index;
__syncthreads();
// Perform forward-substitution for each branch on this level.
// One thread per branch.
if (tid < width) {
// Find the index of the first node in this branch.
const unsigned len = lvl_lengths[tid];
unsigned pos = lvl_meta.matrix_data_index + (len-1)*width + tid;
if (d[pos]!=0) {
// Load the rhs value for the parent node of this branch.
const unsigned p = parent_index + lvl_parents[tid];
T rhsp = rhs[p];
// each branch perform substitution
for (unsigned i=0; i<len; ++i) {
rhsp = rhs[pos] - u[pos]*rhsp;
rhsp /= d[pos];
rhs[pos] = rhsp;
pos -= width;
}
}
}
}
}
} // namespace kernels
void gather(
const fvm_value_type* from,
fvm_value_type* to,
const fvm_index_type* p,
unsigned n)
{
constexpr unsigned blockdim = 128;
const unsigned griddim = impl::block_count(n, blockdim);
hipLaunchKernelGGL(( kernels::gather), dim3(griddim), dim3(blockdim), 0, 0, from, to, p, n);
}
void scatter(
const fvm_value_type* from,
fvm_value_type* to,
const fvm_index_type* p,
unsigned n)
{
constexpr unsigned blockdim = 128;
const unsigned griddim = impl::block_count(n, blockdim);
hipLaunchKernelGGL(( kernels::scatter), dim3(griddim), dim3(blockdim), 0, 0, from, to, p, n);
}
void assemble_matrix_fine(
fvm_value_type* d,
fvm_value_type* rhs,
const fvm_value_type* invariant_d,
const fvm_value_type* voltage,
const fvm_value_type* current,
const fvm_value_type* conductivity,
const fvm_value_type* cv_capacitance,
const fvm_value_type* area,
const fvm_index_type* cv_to_intdom,
const fvm_value_type* dt_intdom,
const fvm_index_type* perm,
unsigned n)
{
const unsigned block_dim = 128;
const unsigned num_blocks = impl::block_count(n, block_dim);
hipLaunchKernelGGL(( kernels::assemble_matrix_fine), dim3(num_blocks), dim3(block_dim), 0, 0,
d, rhs, invariant_d, voltage, current, conductivity, cv_capacitance, area,
cv_to_intdom, dt_intdom, perm, n);
}
// Example:
//
// block 0 block 1 block 2
// .~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~ ~ ~
//
// L0 \ / L5 \ /
// \/ \/
// L1 \ / \ / L3 \ / \ | / \ / L6 \ / . . .
// \ / \ / \ / \|/ \ / \ /
// L2 | | L4 | | | L7 |
// | | | | | |
//
// levels = [L0, L1, L2, L3, L4, L5, L6, L7, ... ]
// block_index = [0, 3, 5, 8, ...]
// num_levels = [3, 2, 3, ...]
// num_cells = [2, 3, ...]
// num_blocks = level_start.size() - 1 = num_levels.size() = num_cells.size()
void solve_matrix_fine(
fvm_value_type* rhs,
fvm_value_type* d, // diagonal values
const fvm_value_type* u, // upper diagonal (and lower diagonal as the matrix is SPD)
const level_metadata* level_meta, // information pertaining to each level
const fvm_index_type* level_lengths, // lengths of branches of every level concatenated
const fvm_index_type* level_parents, // parents of branches of every level concatenated
const fvm_index_type* block_index, // start index into levels for each gpu block
fvm_index_type* num_cells, // the number of cells packed into this single matrix
fvm_index_type* padded_size, // length of rhs, d, u, including padding
unsigned num_blocks, // number of blocks
unsigned blocksize) // size of each block
{
hipLaunchKernelGGL(( kernels::solve_matrix_fine), dim3(num_blocks), dim3(blocksize), 0, 0,
rhs, d, u, level_meta, level_lengths, level_parents, block_index,
num_cells);
}
} // namespace gpu
} // namespace arb
| 217eaf76ea7750fea88c8979fb3b5faa49f1d156.cu | #include <arbor/fvm_types.hpp>
#include <arbor/gpu/gpu_api.hpp>
#include <arbor/gpu/gpu_common.hpp>
#include "matrix_common.hpp"
#include "matrix_fine.hpp"
namespace arb {
namespace gpu {
namespace kernels {
//
// gather and scatter kernels
//
// to[i] = from[p[i]]
template <typename T, typename I>
__global__
void gather(const T* __restrict__ const from,
T* __restrict__ const to,
const I* __restrict__ const p,
unsigned n) {
unsigned i = threadIdx.x + blockDim.x*blockIdx.x;
if (i<n) {
to[i] = from[p[i]];
}
}
// to[p[i]] = from[i]
template <typename T, typename I>
__global__
void scatter(const T* __restrict__ const from,
T* __restrict__ const to,
const I* __restrict__ const p,
unsigned n) {
unsigned i = threadIdx.x + blockDim.x*blockIdx.x;
if (i<n) {
to[p[i]] = from[i];
}
}
/// GPU implementation of Hines matrix assembly.
/// Fine layout.
/// For a given time step size dt:
/// - use the precomputed alpha and alpha_d values to construct the diagonal
/// and off diagonal of the symmetric Hines matrix.
/// - compute the RHS of the linear system to solve.
template <typename T, typename I>
__global__
void assemble_matrix_fine(
T* __restrict__ const d,
T* __restrict__ const rhs,
const T* __restrict__ const invariant_d,
const T* __restrict__ const voltage,
const T* __restrict__ const current,
const T* __restrict__ const conductivity,
const T* __restrict__ const cv_capacitance,
const T* __restrict__ const area,
const I* __restrict__ const cv_to_intdom,
const T* __restrict__ const dt_intdom,
const I* __restrict__ const perm,
unsigned n)
{
const unsigned tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < n) {
// The 1e-3 is a constant of proportionality required to ensure that the
// conductance (gi) values have units μS (micro-Siemens).
// See the model documentation in docs/model for more information.
const auto dt = dt_intdom[cv_to_intdom[tid]];
const auto p = dt > 0;
const auto pid = perm[tid];
const auto area_factor = T(1e-3)*area[tid];
const auto gi = T(1e-3)*cv_capacitance[tid]/dt + area_factor*conductivity[tid];
const auto r_d = gi + invariant_d[tid];
const auto r_rhs = gi*voltage[tid] - area_factor*current[tid];
d[pid] = p ? r_d : 0;
rhs[pid] = p ? r_rhs : voltage[tid];
}
}
/// GPU implementation of Hines Matrix solver.
/// Fine-grained tree based solver.
/// Each block solves a set of matricesb iterating over the levels of matrix
/// and perfoming a backward and forward substitution. On each level one thread
/// gets assigned to one branch on this level of a matrix and solves and
/// performs the substitution. Afterwards all threads continue on the next
/// level.
/// To avoid idle threads, one should try that on each level, there is a similar
/// number of branches.
template <typename T>
__global__
void solve_matrix_fine(
T* __restrict__ const rhs,
T* __restrict__ const d,
const T* __restrict__ const u,
const level_metadata* __restrict__ const level_meta,
const fvm_index_type* __restrict__ const level_lengths,
const fvm_index_type* __restrict__ const level_parents,
const fvm_index_type* __restrict__ const block_index,
const fvm_index_type* __restrict__ const num_matrix) // number of packed matrices = number of cells
{
const auto tid = threadIdx.x;
const auto bid = blockIdx.x;
const auto first_level = block_index[bid];
const auto num_levels = block_index[bid + 1] - first_level;
const auto block_level_meta = &level_meta[first_level];
// backward substitution
for (unsigned l=0; l<num_levels-1; ++l) {
// Metadata for this level and the next level
const auto& lvl_meta = block_level_meta[l];
const auto& next_lvl_meta = block_level_meta[l+1];
// Addresses of the first elements of level_lengths and level_parents
// that belong to this level
const auto lvl_lengths = level_lengths + lvl_meta.level_data_index;
const auto lvl_parents = level_parents + lvl_meta.level_data_index;
const unsigned width = lvl_meta.num_branches;
// Perform backward substitution for each branch on this level.
// One thread per branch.
if (tid < width) {
const unsigned len = lvl_lengths[tid];
unsigned pos = lvl_meta.matrix_data_index + tid;
// Zero diagonal term implies dt==0; just leave rhs (for whole matrix)
// alone in that case.
// Each cell has a different `dt`, because we choose time step size
// according to when the next event is arriving at a cell. So, some
// cells require more time steps than others, but we have to solve
// all the matrices at the same time. When a cell finishes, we put a
// `0` on the diagonal to mark that it should not be solved for.
if (d[pos]!=0) {
// each branch perform substitution
for (unsigned i=0; i<len-1; ++i) {
const unsigned next_pos = pos + width;
const auto d_next = d[next_pos];
const auto rhs_next = rhs[next_pos];
const T factor = -u[pos]/d[pos];
d[next_pos] = fma(factor, u[pos], d_next);
rhs[next_pos] = fma(factor, rhs[pos], rhs_next);
pos = next_pos;
}
// Update d and rhs at the parent node of this branch.
// A parent may have more than one contributing to it, so we use
// atomic updates to avoid races conditions.
const unsigned parent_index = next_lvl_meta.matrix_data_index;
const unsigned p = parent_index + lvl_parents[tid];
const T factor = -u[pos] / d[pos];
gpu_atomic_add(d + p, factor*u[pos]);
gpu_atomic_add(rhs + p, factor*rhs[pos]);
}
}
__syncthreads();
}
// Solve the root
{
// The levels are sorted such that the root is the last level
const auto& last_lvl_meta = block_level_meta[num_levels-1];
const auto lvl_lengths = level_lengths + last_lvl_meta.level_data_index;
const unsigned width = num_matrix[bid];
if (tid < width) {
const unsigned len = lvl_lengths[tid];
unsigned pos = last_lvl_meta.matrix_data_index + tid;
if (d[pos]!=0) {
// backward
for (unsigned i=0; i<len-1; ++i) {
const unsigned next_pos = pos + width;
const T factor = -u[pos] / d[pos];
const auto rhs_next = rhs[next_pos];
const auto d_next = d[next_pos];
d[next_pos] = fma(factor, u[pos], d_next);
rhs[next_pos] = fma(factor, rhs[pos], rhs_next);
pos = next_pos;
}
auto rhsp = rhs[pos] / d[pos];
rhs[pos] = rhsp;
pos -= width;
// forward
for (unsigned i=0; i<len-1; ++i) {
rhsp = rhs[pos] - u[pos]*rhsp;
rhsp /= d[pos];
rhs[pos] = rhsp;
pos -= width;
}
}
}
}
// forward substitution
// take great care with loop limits decrementing unsigned counter l
for (unsigned l=num_levels-1; l>0; --l) {
const auto& lvl_meta = block_level_meta[l-1];
// Addresses of the first elements of level_lengths and level_parents
// that belong to this level
const auto lvl_lengths = level_lengths + lvl_meta.level_data_index;
const auto lvl_parents = level_parents + lvl_meta.level_data_index;
const unsigned width = lvl_meta.num_branches;
const unsigned parent_index = block_level_meta[l].matrix_data_index;
__syncthreads();
// Perform forward-substitution for each branch on this level.
// One thread per branch.
if (tid < width) {
// Find the index of the first node in this branch.
const unsigned len = lvl_lengths[tid];
unsigned pos = lvl_meta.matrix_data_index + (len-1)*width + tid;
if (d[pos]!=0) {
// Load the rhs value for the parent node of this branch.
const unsigned p = parent_index + lvl_parents[tid];
T rhsp = rhs[p];
// each branch perform substitution
for (unsigned i=0; i<len; ++i) {
rhsp = rhs[pos] - u[pos]*rhsp;
rhsp /= d[pos];
rhs[pos] = rhsp;
pos -= width;
}
}
}
}
}
} // namespace kernels
void gather(
const fvm_value_type* from,
fvm_value_type* to,
const fvm_index_type* p,
unsigned n)
{
constexpr unsigned blockdim = 128;
const unsigned griddim = impl::block_count(n, blockdim);
kernels::gather<<<griddim, blockdim>>>(from, to, p, n);
}
void scatter(
const fvm_value_type* from,
fvm_value_type* to,
const fvm_index_type* p,
unsigned n)
{
constexpr unsigned blockdim = 128;
const unsigned griddim = impl::block_count(n, blockdim);
kernels::scatter<<<griddim, blockdim>>>(from, to, p, n);
}
void assemble_matrix_fine(
fvm_value_type* d,
fvm_value_type* rhs,
const fvm_value_type* invariant_d,
const fvm_value_type* voltage,
const fvm_value_type* current,
const fvm_value_type* conductivity,
const fvm_value_type* cv_capacitance,
const fvm_value_type* area,
const fvm_index_type* cv_to_intdom,
const fvm_value_type* dt_intdom,
const fvm_index_type* perm,
unsigned n)
{
const unsigned block_dim = 128;
const unsigned num_blocks = impl::block_count(n, block_dim);
kernels::assemble_matrix_fine<<<num_blocks, block_dim>>>(
d, rhs, invariant_d, voltage, current, conductivity, cv_capacitance, area,
cv_to_intdom, dt_intdom, perm, n);
}
// Example:
//
// block 0 block 1 block 2
// .~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~ ~ ~
//
// L0 \ / L5 \ /
// \/ \/
// L1 \ / \ / L3 \ / \ | / \ / L6 \ / . . .
// \ / \ / \ / \|/ \ / \ /
// L2 | | L4 | | | L7 |
// | | | | | |
//
// levels = [L0, L1, L2, L3, L4, L5, L6, L7, ... ]
// block_index = [0, 3, 5, 8, ...]
// num_levels = [3, 2, 3, ...]
// num_cells = [2, 3, ...]
// num_blocks = level_start.size() - 1 = num_levels.size() = num_cells.size()
void solve_matrix_fine(
fvm_value_type* rhs,
fvm_value_type* d, // diagonal values
const fvm_value_type* u, // upper diagonal (and lower diagonal as the matrix is SPD)
const level_metadata* level_meta, // information pertaining to each level
const fvm_index_type* level_lengths, // lengths of branches of every level concatenated
const fvm_index_type* level_parents, // parents of branches of every level concatenated
const fvm_index_type* block_index, // start index into levels for each gpu block
fvm_index_type* num_cells, // the number of cells packed into this single matrix
fvm_index_type* padded_size, // length of rhs, d, u, including padding
unsigned num_blocks, // number of blocks
unsigned blocksize) // size of each block
{
kernels::solve_matrix_fine<<<num_blocks, blocksize>>>(
rhs, d, u, level_meta, level_lengths, level_parents, block_index,
num_cells);
}
} // namespace gpu
} // namespace arb
|
1504798706a5190339c2880cffac1513f5f18fe7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/inner_product_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int dim,
const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot, Dtype epsilon) {
CUDA_KERNEL_LOOP(index, num) {
Dtype dot = 0;
for (int d = 0; d < dim; ++d) {
dot += data_1[index * dim + d] * data_2[index * dim + d];
}
channel_dot[index] = dot + epsilon;
}
}
template <typename Dtype>
__global__ void kernel_channel_scal(const int num, const int dim,
const Dtype* norm_data,
Dtype* input_output_data) {
CUDA_KERNEL_LOOP(index, num * dim) {
int n = index / dim;
input_output_data[index] *= norm_data[n];
}
}
template <typename Dtype>
void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data();
if (normalize_ && bottom.size() == 1) {
Dtype* mutable_weight = this->blobs_[0]->mutable_gpu_data();
Dtype* weight_norm_data = weight_norm_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype> << <CAFFE_GET_BLOCKS(N_),
CAFFE_CUDA_NUM_THREADS >> >(N_, K_, weight, weight, weight_norm_data, 1e-12);
caffe_gpu_powx(N_, weight_norm_data, Dtype(-0.5), weight_norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scal<Dtype> << <CAFFE_GET_BLOCKS(N_ * K_),
CAFFE_CUDA_NUM_THREADS >> >(N_, K_, weight_norm_data, mutable_weight);
}
if (M_ == 1) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1.,
weight, bottom_data, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0],
bottom.size() == 3 ? bottom[2]->gpu_data() : this->blobs_[1]->gpu_data(), top_data);
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans,
transpose_ ? CblasNoTrans : CblasTrans,
M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
bias_multiplier_.gpu_data(),
bottom.size() == 3 ? bottom[2]->gpu_data() : this->blobs_[1]->gpu_data(), (Dtype)1., top_data);
}
}
template <typename Dtype>
void InnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data();
if ((bottom.size() == 1 && this->param_propagate_down_[0]) ||
(bottom.size() >= 2 && propagate_down[1])) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* weight_diff = bottom.size() >= 2 ? bottom[1]->mutable_gpu_diff() : this->blobs_[0]->mutable_gpu_diff();
// Gradient with respect to weight
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
K_, N_, M_,
(Dtype)1., bottom_data, top_diff,
(Dtype)1., weight_diff);
} else {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
N_, K_, M_,
(Dtype)1., top_diff, bottom_data,
(Dtype)1., weight_diff);
}
}
if (bias_term_ && (this->param_propagate_down_[1] ||
(bottom.size() == 3 && propagate_down[2]))) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bias
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff,
bias_multiplier_.gpu_data(), (Dtype)1.,
bottom.size() == 3 ? bottom[2]->mutable_gpu_diff() : this->blobs_[1]->mutable_gpu_diff());
}
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bottom data
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans,
M_, K_, N_,
(Dtype)1., top_diff, weight,
(Dtype)0., bottom[0]->mutable_gpu_diff());
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
M_, K_, N_,
(Dtype)1., top_diff, weight,
(Dtype)0., bottom[0]->mutable_gpu_diff());
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer);
} // namespace caffe
| 1504798706a5190339c2880cffac1513f5f18fe7.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/inner_product_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int dim,
const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot, Dtype epsilon) {
CUDA_KERNEL_LOOP(index, num) {
Dtype dot = 0;
for (int d = 0; d < dim; ++d) {
dot += data_1[index * dim + d] * data_2[index * dim + d];
}
channel_dot[index] = dot + epsilon;
}
}
template <typename Dtype>
__global__ void kernel_channel_scal(const int num, const int dim,
const Dtype* norm_data,
Dtype* input_output_data) {
CUDA_KERNEL_LOOP(index, num * dim) {
int n = index / dim;
input_output_data[index] *= norm_data[n];
}
}
template <typename Dtype>
void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data();
if (normalize_ && bottom.size() == 1) {
Dtype* mutable_weight = this->blobs_[0]->mutable_gpu_data();
Dtype* weight_norm_data = weight_norm_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype> << <CAFFE_GET_BLOCKS(N_),
CAFFE_CUDA_NUM_THREADS >> >(N_, K_, weight, weight, weight_norm_data, 1e-12);
caffe_gpu_powx(N_, weight_norm_data, Dtype(-0.5), weight_norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scal<Dtype> << <CAFFE_GET_BLOCKS(N_ * K_),
CAFFE_CUDA_NUM_THREADS >> >(N_, K_, weight_norm_data, mutable_weight);
}
if (M_ == 1) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1.,
weight, bottom_data, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0],
bottom.size() == 3 ? bottom[2]->gpu_data() : this->blobs_[1]->gpu_data(), top_data);
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans,
transpose_ ? CblasNoTrans : CblasTrans,
M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
bias_multiplier_.gpu_data(),
bottom.size() == 3 ? bottom[2]->gpu_data() : this->blobs_[1]->gpu_data(), (Dtype)1., top_data);
}
}
template <typename Dtype>
void InnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data();
if ((bottom.size() == 1 && this->param_propagate_down_[0]) ||
(bottom.size() >= 2 && propagate_down[1])) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* weight_diff = bottom.size() >= 2 ? bottom[1]->mutable_gpu_diff() : this->blobs_[0]->mutable_gpu_diff();
// Gradient with respect to weight
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
K_, N_, M_,
(Dtype)1., bottom_data, top_diff,
(Dtype)1., weight_diff);
} else {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
N_, K_, M_,
(Dtype)1., top_diff, bottom_data,
(Dtype)1., weight_diff);
}
}
if (bias_term_ && (this->param_propagate_down_[1] ||
(bottom.size() == 3 && propagate_down[2]))) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bias
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff,
bias_multiplier_.gpu_data(), (Dtype)1.,
bottom.size() == 3 ? bottom[2]->mutable_gpu_diff() : this->blobs_[1]->mutable_gpu_diff());
}
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bottom data
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans,
M_, K_, N_,
(Dtype)1., top_diff, weight,
(Dtype)0., bottom[0]->mutable_gpu_diff());
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
M_, K_, N_,
(Dtype)1., top_diff, weight,
(Dtype)0., bottom[0]->mutable_gpu_diff());
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer);
} // namespace caffe
|
51b32864de9c3d7c3456ee0963f2e695c6d107cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "selectNonZero.cuh"
__global__
void selectNonZeroKernel(float* kernel, const int m, const int n){
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int idy = threadIdx.y + blockDim.y*blockIdx.y;
int id = idx + idy*m;
if(idx < m && idy < n){
kernel[id] = (kernel[id] > 0)?kernel[id]:0.0f;
}
}
void selectNonZeroGlobalMemCuda(float* kernel, const int m, const int n){
if(!kernel){
std::cout<< "Kernel not allocated"<<std::endl;
return;
}
// allocate block and grid size
dim3 block(32, 8, 1);
dim3 grid = computeGrid2D(block, m, n);
//calling cuda kernel
hipLaunchKernelGGL(( selectNonZeroKernel) , dim3(grid),dim3(block), 0, 0, kernel, m, n);
}
| 51b32864de9c3d7c3456ee0963f2e695c6d107cb.cu | #include "selectNonZero.cuh"
__global__
void selectNonZeroKernel(float* kernel, const int m, const int n){
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int idy = threadIdx.y + blockDim.y*blockIdx.y;
int id = idx + idy*m;
if(idx < m && idy < n){
kernel[id] = (kernel[id] > 0)?kernel[id]:0.0f;
}
}
void selectNonZeroGlobalMemCuda(float* kernel, const int m, const int n){
if(!kernel){
std::cout<< "Kernel not allocated"<<std::endl;
return;
}
// allocate block and grid size
dim3 block(32, 8, 1);
dim3 grid = computeGrid2D(block, m, n);
//calling cuda kernel
selectNonZeroKernel <<<grid,block>>> (kernel, m, n);
}
|
76e56fe7117779f7b97f03310fe4fc5d8d756b40.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
extern "C" void thrust_reduce_wrapper(
double * __restrict__ c,
int n,
double * sum
) {
thrust::device_ptr<double> c_ptr = thrust::device_pointer_cast(c);
// TODO: Call thrust::reduce using c_ptr (the length is n) and give thrust's output to the calling function by means of sum (which needs to be dereferenced)
// Hint: thrust::reduce can use device_ptr address as InputIterators
// Documentation: https://thrust.github.io/doc/group__reductions.html#ga69434d74f2e6117040fb38d1a28016c2
}
| 76e56fe7117779f7b97f03310fe4fc5d8d756b40.cu | #include <cuda.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
extern "C" void thrust_reduce_wrapper(
double * __restrict__ c,
int n,
double * sum
) {
thrust::device_ptr<double> c_ptr = thrust::device_pointer_cast(c);
// TODO: Call thrust::reduce using c_ptr (the length is n) and give thrust's output to the calling function by means of sum (which needs to be dereferenced)
// Hint: thrust::reduce can use device_ptr address as InputIterators
// Documentation: https://thrust.github.io/doc/group__reductions.html#ga69434d74f2e6117040fb38d1a28016c2
}
|
b970ca77731f41eba4ab3dba45de736b3d00f664.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <time.h>
typedef double REAL;
#include "TridagKernel.cu.h"
#include "TridagPar.h"
int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1)
{
unsigned int resolution=1000000;
long int diff = (t2->tv_usec + resolution * t2->tv_sec) - (t1->tv_usec + resolution * t1->tv_sec);
result->tv_sec = diff / resolution;
result->tv_usec = diff % resolution;
return (diff<0);
}
/**
* solves a segmented tridag, i.e.,
* solves `n/sgm_size` independent tridag problems.
* Logically, the arrays should be of size [n/sgm_size][sgm_size],
* and the segmented tridag corresponds to a map on the outer
* dimension which is applying tridag to its inner dimension.
* This is the CUDA parallel implementation, which uses
* block-level segmented scans. This version assumes that
* `n` is a multiple of `sgm_sz` and also that `block_size` is
* a multiple of `sgm_size`, i.e., such that segments do not
* cross block boundaries.
*/
void tridagCUDAWrapper( const unsigned int block_size,
REAL* a,
REAL* b,
REAL* c,
REAL* r,
const unsigned int n,
const unsigned int sgm_sz,
REAL* u,
REAL* uu
) {
unsigned int num_blocks;
unsigned int sh_mem_size = block_size * 8 * sizeof(REAL);
// assumes sgm_sz divides block_size
if((block_size % sgm_sz)!=0) {
printf("Invalid segment or block size. Exiting!\n\n!");
exit(0);
}
if((n % sgm_sz)!=0) {
printf("Invalid total size (not a multiple of segment size). Exiting!\n\n!");
exit(0);
}
num_blocks = (n + (block_size - 1)) / block_size;
hipLaunchKernelGGL(( TRIDAG_SOLVER), dim3(num_blocks), dim3(block_size), sh_mem_size , 0, a, b, c, r, n, sgm_sz, u, uu);
hipDeviceSynchronize();
}
/**
* solves a segmented tridag, i.e.,
* solves `n/sgm_size` independent tridag problems.
* Logically, the arrays should be of size [n/sgm_size][sgm_size],
* and the segmented tridag corresponds to a map on the outer
* dimension which is applying tridag to its inner dimension.
* This is the CPU sequential implementation, but morally the
* code is re-written to use (sequential) scans.
*/
void
goldenSeqTridagPar(
const REAL* a, // size [n]
const REAL* b, // size [n]
const REAL* c, // size [n]
const REAL* r, // size [n]
const int n,
const int sgm_size,
REAL* u, // size [n]
REAL* uu // size [n] temporary
) {
if((n % sgm_size)!=0) {
printf("Invalid total size (not a multiple of segment size). Exiting!\n\n!");
exit(0);
}
for(int i=0; i<n; i+=sgm_size) {
tridagPar(a+i, b+i, c+i, r+i, sgm_size, u+i, uu+i);
}
}
void init(int block, int n, REAL* a, REAL* b, REAL* c, REAL* d) {
srand(111);
// Tridag is numerically unstable if tried with random data,
// but still ... lets try. We allocate the same data for every block,
// otherwise we have good chances of hitting a bad case!
for(int i=0; i<block; i++) {
a[i] = ((REAL) rand()) / RAND_MAX;
b[i] = ((REAL) rand()) / RAND_MAX;
c[i] = ((REAL) rand()) / RAND_MAX;
d[i] = ((REAL) rand()) / RAND_MAX;
}
for(int i=block; i<n; i++) {
a[i] = a[i-block];
b[i] = b[i-block];
c[i] = c[i-block];
d[i] = d[i-block];
}
}
#define N (1024*1024*8)
#define SGM_SIZE 8
#define BLOCK_SIZE 256
#define EPS 0.002
void validate(int n, REAL* cpu, REAL* gpu) {
for(int i=0; i<n; i++) {
REAL div_fact = (fabs(cpu[i]) < 1.0) ? 1.0 : fabs(cpu[i]);
REAL diff = fabs(cpu[i]-gpu[i])/div_fact;
if( diff > EPS ) {
printf("INVALID Result at index %d, %f %f diff: %f. Exiting!\n\n", i, cpu[i], gpu[i], diff);
exit(0);
}
}
}
/*REAL initOperator( const vector<REAL>& x,
vector<vector<REAL> >& Dxx) {
const unsigned n = x.size();
REAL dxl, dxu;
// lower boundary
dxl = 0.0;
dxu = x[1] - x[0];
Dxx[0][0] = 0.0;
Dxx[0][1] = 0.0;
Dxx[0][2] = 0.0;
Dxx[0][3] = 0.0;
// standard case
for(unsigned i=1;i<n-1;i++)
{
dxl = x[i] - x[i-1];
dxu = x[i+1] - x[i];
Dxx[i][0] = 2.0/dxl/(dxl+dxu);
Dxx[i][1] = -2.0*(1.0/dxl + 1.0/dxu)/(dxl+dxu);
Dxx[i][2] = 2.0/dxu/(dxl+dxu);
Dxx[i][3] = 0.0;
}
// upper boundary
dxl = x[n-1] - x[n-2];
dxu = 0.0;
Dxx[n-1][0] = 0.0;
Dxx[n-1][1] = 0.0;
Dxx[n-1][2] = 0.0;
Dxx[n-1][3] = 0.0;
return Dxx;
}
void testInitOperator(REAL* x, REAL* Dxx, const unsigned n, REAL* MyResult){
const int block_size = 32;
unsigned int num_blocks = ( (n % block_size) == 0) ?
n / block_size :
n / block_size + 1 ;
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
srand(time(NULL));
// call CPU code
gettimeofday(&t_start, NULL);
MyResult = initOperator(x,Dxx);
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Init Operator on CPU runs in: %lu microsecs", elapsed);
// allocate memory on GPU
float* d_x;
hipMalloc((void**)&d_x, n*sizeof(float));
float* d_Dxx;
hipMalloc((void**)&d_Dxx, n*sizeof(float));
// Copy data on CPU to GPU
hipMemcpy(d_x, x, n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Dxx, Dxx, n*sizeof(float), hipMemcpyHostToDevice);
// perform operation
parInitOperator(Real* x, Real* Dxx, const unsigned n)
// copy data back to CPU
}*/
int main(int argc, char** argv) {
const unsigned int mem_size = N * sizeof(REAL);
// allocate arrays on CPU:
REAL* a = (REAL*) malloc(mem_size);
REAL* b = (REAL*) malloc(mem_size);
REAL* c = (REAL*) malloc(mem_size);
REAL* r = (REAL*) malloc(mem_size);
REAL* gpu_u = (REAL*) malloc(mem_size);
REAL* cpu_u = (REAL*) malloc(mem_size);
REAL* gpu_uu = (REAL*) malloc(mem_size);
REAL* cpu_uu = (REAL*) malloc(mem_size);
// init a, b, c, y
init(BLOCK_SIZE, N, a, b, c, r);
// allocate gpu arrays
REAL *d_a, *d_b, *d_c, *d_r, *d_uu, *d_u;
hipMalloc((void**)&d_a, mem_size);
hipMalloc((void**)&d_b, mem_size);
hipMalloc((void**)&d_c, mem_size);
hipMalloc((void**)&d_r, mem_size);
hipMalloc((void**)&d_uu, mem_size);
hipMalloc((void**)&d_u, mem_size);
// Host-To-Device Copy
hipMemcpy(d_a, a, mem_size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, mem_size, hipMemcpyHostToDevice);
hipMemcpy(d_c, c, mem_size, hipMemcpyHostToDevice);
hipMemcpy(d_r, r, mem_size, hipMemcpyHostToDevice);
// execute on CPU
goldenSeqTridagPar(a,b,c,r, N,SGM_SIZE, cpu_u,cpu_uu);
// execute on GPU
tridagCUDAWrapper(BLOCK_SIZE, d_a,d_b,d_c,d_r, N,SGM_SIZE, d_u,d_uu);
hipError_t cudaReturnCode = hipPeekAtLastError();
if(cudaReturnCode != hipSuccess )
{
printf("\nCUDA ERROR: \"%i: %s\".\n", cudaReturnCode, hipGetErrorString(cudaReturnCode));
}
// transfer back to CPU
hipMemcpy(gpu_u, d_u, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(gpu_uu, d_uu, mem_size, hipMemcpyDeviceToHost);
// free gpu memory
hipFree(d_a); hipFree(d_b); hipFree(d_c); hipFree(d_r);
hipFree(d_u); hipFree(d_uu);
// validate
validate(N, cpu_u, gpu_u);
printf("It Amazingly Validates!!!\n\n");
// deallocate cpu arrays
free(a); free(b); free(c); free(r);
free(gpu_uu); free(cpu_uu);
free(gpu_u); free(cpu_u);
return 0;
}
| b970ca77731f41eba4ab3dba45de736b3d00f664.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <time.h>
typedef double REAL;
#include "TridagKernel.cu.h"
#include "TridagPar.h"
int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1)
{
unsigned int resolution=1000000;
long int diff = (t2->tv_usec + resolution * t2->tv_sec) - (t1->tv_usec + resolution * t1->tv_sec);
result->tv_sec = diff / resolution;
result->tv_usec = diff % resolution;
return (diff<0);
}
/**
* solves a segmented tridag, i.e.,
* solves `n/sgm_size` independent tridag problems.
* Logically, the arrays should be of size [n/sgm_size][sgm_size],
* and the segmented tridag corresponds to a map on the outer
* dimension which is applying tridag to its inner dimension.
* This is the CUDA parallel implementation, which uses
* block-level segmented scans. This version assumes that
* `n` is a multiple of `sgm_sz` and also that `block_size` is
* a multiple of `sgm_size`, i.e., such that segments do not
* cross block boundaries.
*/
void tridagCUDAWrapper( const unsigned int block_size,
REAL* a,
REAL* b,
REAL* c,
REAL* r,
const unsigned int n,
const unsigned int sgm_sz,
REAL* u,
REAL* uu
) {
unsigned int num_blocks;
unsigned int sh_mem_size = block_size * 8 * sizeof(REAL);
// assumes sgm_sz divides block_size
if((block_size % sgm_sz)!=0) {
printf("Invalid segment or block size. Exiting!\n\n!");
exit(0);
}
if((n % sgm_sz)!=0) {
printf("Invalid total size (not a multiple of segment size). Exiting!\n\n!");
exit(0);
}
num_blocks = (n + (block_size - 1)) / block_size;
TRIDAG_SOLVER<<< num_blocks, block_size, sh_mem_size >>>(a, b, c, r, n, sgm_sz, u, uu);
cudaThreadSynchronize();
}
/**
* solves a segmented tridag, i.e.,
* solves `n/sgm_size` independent tridag problems.
* Logically, the arrays should be of size [n/sgm_size][sgm_size],
* and the segmented tridag corresponds to a map on the outer
* dimension which is applying tridag to its inner dimension.
* This is the CPU sequential implementation, but morally the
* code is re-written to use (sequential) scans.
*/
void
goldenSeqTridagPar(
const REAL* a, // size [n]
const REAL* b, // size [n]
const REAL* c, // size [n]
const REAL* r, // size [n]
const int n,
const int sgm_size,
REAL* u, // size [n]
REAL* uu // size [n] temporary
) {
if((n % sgm_size)!=0) {
printf("Invalid total size (not a multiple of segment size). Exiting!\n\n!");
exit(0);
}
for(int i=0; i<n; i+=sgm_size) {
tridagPar(a+i, b+i, c+i, r+i, sgm_size, u+i, uu+i);
}
}
void init(int block, int n, REAL* a, REAL* b, REAL* c, REAL* d) {
srand(111);
// Tridag is numerically unstable if tried with random data,
// but still ... lets try. We allocate the same data for every block,
// otherwise we have good chances of hitting a bad case!
for(int i=0; i<block; i++) {
a[i] = ((REAL) rand()) / RAND_MAX;
b[i] = ((REAL) rand()) / RAND_MAX;
c[i] = ((REAL) rand()) / RAND_MAX;
d[i] = ((REAL) rand()) / RAND_MAX;
}
for(int i=block; i<n; i++) {
a[i] = a[i-block];
b[i] = b[i-block];
c[i] = c[i-block];
d[i] = d[i-block];
}
}
#define N (1024*1024*8)
#define SGM_SIZE 8
#define BLOCK_SIZE 256
#define EPS 0.002
void validate(int n, REAL* cpu, REAL* gpu) {
for(int i=0; i<n; i++) {
REAL div_fact = (fabs(cpu[i]) < 1.0) ? 1.0 : fabs(cpu[i]);
REAL diff = fabs(cpu[i]-gpu[i])/div_fact;
if( diff > EPS ) {
printf("INVALID Result at index %d, %f %f diff: %f. Exiting!\n\n", i, cpu[i], gpu[i], diff);
exit(0);
}
}
}
/*REAL initOperator( const vector<REAL>& x,
vector<vector<REAL> >& Dxx) {
const unsigned n = x.size();
REAL dxl, dxu;
// lower boundary
dxl = 0.0;
dxu = x[1] - x[0];
Dxx[0][0] = 0.0;
Dxx[0][1] = 0.0;
Dxx[0][2] = 0.0;
Dxx[0][3] = 0.0;
// standard case
for(unsigned i=1;i<n-1;i++)
{
dxl = x[i] - x[i-1];
dxu = x[i+1] - x[i];
Dxx[i][0] = 2.0/dxl/(dxl+dxu);
Dxx[i][1] = -2.0*(1.0/dxl + 1.0/dxu)/(dxl+dxu);
Dxx[i][2] = 2.0/dxu/(dxl+dxu);
Dxx[i][3] = 0.0;
}
// upper boundary
dxl = x[n-1] - x[n-2];
dxu = 0.0;
Dxx[n-1][0] = 0.0;
Dxx[n-1][1] = 0.0;
Dxx[n-1][2] = 0.0;
Dxx[n-1][3] = 0.0;
return Dxx;
}
void testInitOperator(REAL* x, REAL* Dxx, const unsigned n, REAL* MyResult){
const int block_size = 32;
unsigned int num_blocks = ( (n % block_size) == 0) ?
n / block_size :
n / block_size + 1 ;
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
srand(time(NULL));
// call CPU code
gettimeofday(&t_start, NULL);
MyResult = initOperator(x,Dxx);
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Init Operator on CPU runs in: %lu microsecs", elapsed);
// allocate memory on GPU
float* d_x;
cudaMalloc((void**)&d_x, n*sizeof(float));
float* d_Dxx;
cudaMalloc((void**)&d_Dxx, n*sizeof(float));
// Copy data on CPU to GPU
cudaMemcpy(d_x, x, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Dxx, Dxx, n*sizeof(float), cudaMemcpyHostToDevice);
// perform operation
parInitOperator(Real* x, Real* Dxx, const unsigned n)
// copy data back to CPU
}*/
int main(int argc, char** argv) {
const unsigned int mem_size = N * sizeof(REAL);
// allocate arrays on CPU:
REAL* a = (REAL*) malloc(mem_size);
REAL* b = (REAL*) malloc(mem_size);
REAL* c = (REAL*) malloc(mem_size);
REAL* r = (REAL*) malloc(mem_size);
REAL* gpu_u = (REAL*) malloc(mem_size);
REAL* cpu_u = (REAL*) malloc(mem_size);
REAL* gpu_uu = (REAL*) malloc(mem_size);
REAL* cpu_uu = (REAL*) malloc(mem_size);
// init a, b, c, y
init(BLOCK_SIZE, N, a, b, c, r);
// allocate gpu arrays
REAL *d_a, *d_b, *d_c, *d_r, *d_uu, *d_u;
cudaMalloc((void**)&d_a, mem_size);
cudaMalloc((void**)&d_b, mem_size);
cudaMalloc((void**)&d_c, mem_size);
cudaMalloc((void**)&d_r, mem_size);
cudaMalloc((void**)&d_uu, mem_size);
cudaMalloc((void**)&d_u, mem_size);
// Host-To-Device Copy
cudaMemcpy(d_a, a, mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_r, r, mem_size, cudaMemcpyHostToDevice);
// execute on CPU
goldenSeqTridagPar(a,b,c,r, N,SGM_SIZE, cpu_u,cpu_uu);
// execute on GPU
tridagCUDAWrapper(BLOCK_SIZE, d_a,d_b,d_c,d_r, N,SGM_SIZE, d_u,d_uu);
cudaError_t cudaReturnCode = cudaPeekAtLastError();
if(cudaReturnCode != cudaSuccess )
{
printf("\nCUDA ERROR: \"%i: %s\".\n", cudaReturnCode, cudaGetErrorString(cudaReturnCode));
}
// transfer back to CPU
cudaMemcpy(gpu_u, d_u, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(gpu_uu, d_uu, mem_size, cudaMemcpyDeviceToHost);
// free gpu memory
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_r);
cudaFree(d_u); cudaFree(d_uu);
// validate
validate(N, cpu_u, gpu_u);
printf("It Amazingly Validates!!!\n\n");
// deallocate cpu arrays
free(a); free(b); free(c); free(r);
free(gpu_uu); free(cpu_uu);
free(gpu_u); free(cpu_u);
return 0;
}
|
3724e4615821036c7cec43030bde8d60bbf27bcc.hip | // !!! This is a file automatically generated by hipify!!!
/**************************************************************/
/* Class: skymap */
/* Generate the skymap. */
/* Author: Lin Yang 03/07/2012 */
/**************************************************************/
#include <cmath>
#include <fstream>
#include <string>
#include "mapparticle.h"
#include "info.h"
#include "structures.h"
#include "info.h"
#include "VL2_debug.h"
#include <iostream>
#include <vector>
#include "skymap.h"
#include <ctime>
#include "kernel.h"
#include <sys/time.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <cstdio>
using namespace std;;
void Skymap::clock_ini(){
gettimeofday(&initialtime,NULL);
}
long Skymap::clock_get_msec(){
struct timeval endtime;
gettimeofday(&endtime, NULL);
long seconds = endtime.tv_sec - initialtime.tv_sec;
long useconds = endtime.tv_usec - initialtime.tv_usec;
long mtime =(long)(((seconds) * 1000 + useconds/1000.0) + 0.5);
return mtime;
}
Skymap::Skymap(){
//if set _reload, then reload
_reload = NULL;
//if set _rotate, then rotate
//if not set, not doing rotate
_rotate = NULL;
}
bool Skymap::creat_map(){
if(_reload == NULL) reload = false;
else reload = *_reload;
if(_rotate == NULL) rotate = false;
else rotate = *_rotate;
//allocate memory for map
//const int LP = 10000;
//the particle numbers
int Nparts = 0;
//get rotation matrix
if ( rotate ){
for(int _i = 0; _i < 3; _i++){
rotmatrix[0 + _i] = master->rotmatrix[0][_i];
rotmatrix[3 + _i] = master->rotmatrix[1][_i];
rotmatrix[6 + _i] = master->rotmatrix[2][_i];
}
}else{
for(int _i = 0; _i < 3; _i++){
rotmatrix[0 + _i] = 0;
rotmatrix[3 + _i] = 0;
rotmatrix[6 + _i] = 0;
}
rotmatrix[0] = 1.0;
rotmatrix[4] = 1.0;
rotmatrix[8] = 1.0;
}
#ifdef _DEBUG__LY__
//cout << "good1" <<endl;
#endif
// Read particle_numbers
ifstream data_input_file((*datafile).c_str(), ios::binary);
if(data_input_file.bad()){
cout << "Data Error!!!" << endl;
exit(0);
}
data_input_file.read((char*)&Nparts, sizeof(Nparts));
cout << "Particles: " << Nparts << endl;
Np = Nparts;
//setup cpu-memory for particles
num_p = 0;
particles = new MapParticle[CPU_chunk];
//sorted cpu-memory for particles
MapParticle * sorted_particles = new MapParticle[CPU_chunk];
//setup observation position
Real * opos = master->params.opos;
//setup Nside of Healpix map
long Nside = master->map.Nside;
//setup Total Pix number of Healpix map
long Npix_in_map = master->map.Npix;
//this guy is the same of Npix_in_map
//int Npix_map = 12 * Nside * Nside;
//the omega element of each pix
Real dOmega = 4.0 * PI / Npix_in_map;
Real theta0 = acos( 1.0 - dOmega/(2.0*PI) );
//alloc and initialize the allksymap array
Real * allskymap = (Real *) calloc(Npix_in_map, sizeof(Real));
//pointers to the rotmatrix in the GPU memory
Real * dev_rotm = 0; //(should be constant)/
Real * dev_opos = 0; //(should be constant)/
//final factor for the flux
double fluxfactor = master->codeunits.annihilation_flux_to_cgs;
//pointers to the particle memory in the GPU memory
MapParticle * dev_par = 0;
//key and values for sorting
//int * host_keys;
//int * dev_keys;
//int * dev_values;
//checkout is there any GPU
hipError_t cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
return false;
}
//copy rotmatrix into GPU
cudaStatus = hipMalloc((void**)&dev_rotm, sizeof(Real) * 9);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return false;
}
cudaStatus = hipMemcpy(dev_rotm, rotmatrix, sizeof(Real) * 9, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return false;
}
//copy o_pos into GPU
cudaStatus = hipMalloc((void**)&dev_opos, sizeof(Real) * 3);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return false;
}
cudaStatus = hipMemcpy(dev_opos, opos, sizeof(Real) * 3, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return false;
}
//allocate particle memery into GPU
int parsize = PRE_chunk > GPU_chunk ? PRE_chunk : GPU_chunk;
cudaStatus = hipMalloc((void**)&dev_par, sizeof(MapParticle) * parsize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return false;
}
//use for sorting
thrust::device_vector<int> dev_key(CPU_chunk);
thrust::device_vector<int> dev_val(CPU_chunk);
thrust::host_vector<int> host_val(CPU_chunk);
// obtain raw pointer to device vectors memory
int * pd_key = thrust::raw_pointer_cast(&dev_key[0]);
int * pd_val = thrust::raw_pointer_cast(&dev_val[0]);
cout << "Creating map!!!" << endl;
//int rec = Nparts / GPU_chunk / 50;
//recording time
long time_start;
time_start = clock_get_msec();
#ifdef _DEBUG__LY__
for(int _ip = 0, _jp=0 ; _ip < Nparts, _jp<1; _jp ++ ){
#else
for(int _ip = 0, _jp=0 ; _ip < Nparts; _jp ++ ){
#endif
/*****************************************************************************************************************/
long time_loop_start = clock_get_msec();
cout << ">>>>CPU_chunk " << _jp << "--- Particles: " << CPU_chunk + _ip << "/"<< Nparts << "..." << endl;
long time_step_start = clock_get_msec();
int nmax = 0;
//tnmax is the number of particles read into the CPU memory from the hard drive
int tnmax = 0;
//read to CPU chunk
if( (Nparts - _ip) >= CPU_chunk ){//read a block of data
data_input_file.read((char*)particles, sizeof(MapParticle) * CPU_chunk);
_ip += CPU_chunk;
tnmax = CPU_chunk;
}else{
tnmax = (Nparts - _ip);
data_input_file.read((char*)particles, sizeof(MapParticle) * tnmax);
_ip += tnmax;
}
std::cout <<"1) read from disk cost: " << (clock_get_msec() - time_step_start) / 1000.0 << " secs. "<< std::endl;
//step 1: pre-deal with particles
//get the start point of pre-process data
time_step_start = clock_get_msec();
for(int _pt =0; _pt < CPU_chunk; ){
if( (Nparts - _pt) >= PRE_chunk ){//read a block of data
nmax = PRE_chunk;
}else{
nmax = (CPU_chunk - _pt);
}
cudaStatus = hipMemcpy(dev_par, particles + _pt, sizeof(MapParticle) * nmax, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return false;
}
cudaStatus = doWithCuda_pre(PRE_chunk, Nside, theta0, 1, nmax, allskymap,
dev_par, particles + _pt, dev_rotm, dev_opos, pd_key + _pt, pd_val + _pt, _pt);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "pre_calculation Kernel failed!");
return false;
}
_pt += nmax;
}
//hipFree(dev_par);
std::cout <<"2) pre-calculating cost: " << (clock_get_msec() - time_step_start) / 1000.0 << " secs. "<< std::endl;
//step 2: sort
time_step_start = clock_get_msec();
// interface to CUDA code
thrust::sort_by_key(dev_key.begin(), dev_key.end(), dev_val.begin());
thrust::copy(dev_val.begin(), dev_val.end(),host_val.begin());
//actually sort on the particles
for(int _pkk = CPU_chunk - 1; _pkk >=0; _pkk --){
int pg =host_val[_pkk];
sorted_particles[_pkk] = particles[pg];
}
{//swape the sorted particles with the unsorted particles
MapParticle * temp;
temp = particles;
particles = sorted_particles;
sorted_particles = temp;
}
std::cout <<"3) sort cost: " << (clock_get_msec() - time_step_start) / 1000.0 << " secs. "<< std::endl;
//step3: calculate flux
time_step_start = clock_get_msec();
for(int _pt =0, _ptn = 0; _pt < CPU_chunk; _ptn++ ){
if( (Nparts - _pt) >= GPU_chunk ){//read a block of data
nmax = GPU_chunk;
}else{
nmax = (CPU_chunk - _pt);
}
//if(_pt < 2031616){ _pt += nmax; continue;}
cudaStatus = hipMemcpy(dev_par, particles + _pt, sizeof(MapParticle) * nmax, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return false;
}
//decide whether or not do step 1
//if > 5000 exceeds
cudaStatus = doWithCuda_Par(GPU_chunk, Nside, theta0, 1, nmax, allskymap,
dev_par, particles + _pt, dev_rotm, dev_opos);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "map calculation Kernel failed!");
return false;
}
_pt += nmax;
if(_ptn % 10 ==0 ){
std::cout << ".";// << _pt << "/" << CPU_chunk << endl;
std::cout.flush();
}
}
std::cout << endl;
std::cout <<"4) flux calculating cost: " << (clock_get_msec() - time_step_start) / 1000.0 << " secs. "<< std::endl;
std::cout << ">>>>chunk " << _jp << ": "<< (float)_ip / Nparts *100<<"% finished, costs " << (Real)(clock_get_msec() - time_loop_start) / 1000.0 <<
" secs, escaped: " << (Real)(clock_get_msec() - time_start) / 1000.0 << " secs\n" << endl;
_jp =_jp;
/*****************************************************************************************************************/
}
cout << endl;
cout << "Time cosumed: " << (Real)(clock_get_msec() - time_start) / 1000.0 << " seconds" << endl;
#ifdef _DEBUG__LY__
print_out_master(master);
#endif
//divide by solid angle of map's pixels
//conversion of allsky from g^2 cm^-5 to Gev^2 cm^-6 kpc
Real unit_factor = pow(pow((master -> natconst.c_in_cgs), 2) /
(master->units.eV_in_cgs * 1.0e9), 2) / (master->units.Mpc_in_cgs * 1.0e-3);
for(int i = 0; i < Npix_in_map; i++){
float amap = (float)((double)(unit_factor) /
(double)(master->map.dOmega) * double (fluxfactor) * (double) allskymap[i]);
allskymap[i] = amap;
}
#ifdef _DEBUG__LY__
Real rot1[9];
cudaStatus = hipMemcpy(rot1, dev_rotm, 9*sizeof(Real), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return false;
}
// cout << "flux" << fluxes << endl;
cout << "unit_factor: " << unit_factor << endl;
cout << "dOmega: " << master->map.dOmega << endl;
cout << "All Skymap:" <<endl;
cout << "skymap[0]: " << allskymap[0] << endl;
cout << "skymap[1]: " << allskymap[1] << endl;
cout << "skymap[2]: " << allskymap[2] << endl;
cout << "skymap[100]: " << allskymap[100] << endl;
cout << "skymap[10000]: " << allskymap[10000] << endl;
cout << "skymap[1000000]: " << allskymap[1000000] << endl;
cout << "skymap[2977220]: " << allskymap[2977220] << endl;
/*for(int i =0; i < 12*512*512; i++){
if(allskymap[i] !=0 ){
cout << i <<"-----"<< allskymap[i]<<endl;
}
}*/
#endif
cout << "Writing to file \"" << *fits_filename << "\":" << endl;
ofstream output_file (fits_filename -> c_str(), ios::out | ios::binary);
if(output_file.good()){
output_file.write ((char *)allskymap, Npix_in_map * sizeof(Real));
}else{
cout << "Writing Error!";
}
output_file.close();
cout << "success!" << endl;
free(allskymap);
data_input_file.close();
//free(newskymap);
hipFree(dev_rotm);
hipFree(dev_opos);
hipFree(dev_par);
//hipFree(dev_allskymap);
return true;
}
| 3724e4615821036c7cec43030bde8d60bbf27bcc.cu | /**************************************************************/
/* Class: skymap */
/* Generate the skymap. */
/* Author: Lin Yang 03/07/2012 */
/**************************************************************/
#include <cmath>
#include <fstream>
#include <string>
#include "mapparticle.h"
#include "info.h"
#include "structures.h"
#include "info.h"
#include "VL2_debug.h"
#include <iostream>
#include <vector>
#include "skymap.h"
#include <ctime>
#include "kernel.h"
#include <sys/time.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <cstdio>
using namespace std;;
void Skymap::clock_ini(){
gettimeofday(&initialtime,NULL);
}
long Skymap::clock_get_msec(){
struct timeval endtime;
gettimeofday(&endtime, NULL);
long seconds = endtime.tv_sec - initialtime.tv_sec;
long useconds = endtime.tv_usec - initialtime.tv_usec;
long mtime =(long)(((seconds) * 1000 + useconds/1000.0) + 0.5);
return mtime;
}
Skymap::Skymap(){
//if set _reload, then reload
_reload = NULL;
//if set _rotate, then rotate
//if not set, not doing rotate
_rotate = NULL;
}
bool Skymap::creat_map(){
if(_reload == NULL) reload = false;
else reload = *_reload;
if(_rotate == NULL) rotate = false;
else rotate = *_rotate;
//allocate memory for map
//const int LP = 10000;
//the particle numbers
int Nparts = 0;
//get rotation matrix
if ( rotate ){
for(int _i = 0; _i < 3; _i++){
rotmatrix[0 + _i] = master->rotmatrix[0][_i];
rotmatrix[3 + _i] = master->rotmatrix[1][_i];
rotmatrix[6 + _i] = master->rotmatrix[2][_i];
}
}else{
for(int _i = 0; _i < 3; _i++){
rotmatrix[0 + _i] = 0;
rotmatrix[3 + _i] = 0;
rotmatrix[6 + _i] = 0;
}
rotmatrix[0] = 1.0;
rotmatrix[4] = 1.0;
rotmatrix[8] = 1.0;
}
#ifdef _DEBUG__LY__
//cout << "good1" <<endl;
#endif
// Read particle_numbers
ifstream data_input_file((*datafile).c_str(), ios::binary);
if(data_input_file.bad()){
cout << "Data Error!!!" << endl;
exit(0);
}
data_input_file.read((char*)&Nparts, sizeof(Nparts));
cout << "Particles: " << Nparts << endl;
Np = Nparts;
//setup cpu-memory for particles
num_p = 0;
particles = new MapParticle[CPU_chunk];
//sorted cpu-memory for particles
MapParticle * sorted_particles = new MapParticle[CPU_chunk];
//setup observation position
Real * opos = master->params.opos;
//setup Nside of Healpix map
long Nside = master->map.Nside;
//setup Total Pix number of Healpix map
long Npix_in_map = master->map.Npix;
//this guy is the same of Npix_in_map
//int Npix_map = 12 * Nside * Nside;
//the omega element of each pix
Real dOmega = 4.0 * PI / Npix_in_map;
Real theta0 = acos( 1.0 - dOmega/(2.0*PI) );
//alloc and initialize the allksymap array
Real * allskymap = (Real *) calloc(Npix_in_map, sizeof(Real));
//pointers to the rotmatrix in the GPU memory
Real * dev_rotm = 0; //(should be constant)/
Real * dev_opos = 0; //(should be constant)/
//final factor for the flux
double fluxfactor = master->codeunits.annihilation_flux_to_cgs;
//pointers to the particle memory in the GPU memory
MapParticle * dev_par = 0;
//key and values for sorting
//int * host_keys;
//int * dev_keys;
//int * dev_values;
//checkout is there any GPU
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return false;
}
//copy rotmatrix into GPU
cudaStatus = cudaMalloc((void**)&dev_rotm, sizeof(Real) * 9);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return false;
}
cudaStatus = cudaMemcpy(dev_rotm, rotmatrix, sizeof(Real) * 9, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return false;
}
//copy o_pos into GPU
cudaStatus = cudaMalloc((void**)&dev_opos, sizeof(Real) * 3);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return false;
}
cudaStatus = cudaMemcpy(dev_opos, opos, sizeof(Real) * 3, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return false;
}
//allocate particle memery into GPU
int parsize = PRE_chunk > GPU_chunk ? PRE_chunk : GPU_chunk;
cudaStatus = cudaMalloc((void**)&dev_par, sizeof(MapParticle) * parsize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return false;
}
//use for sorting
thrust::device_vector<int> dev_key(CPU_chunk);
thrust::device_vector<int> dev_val(CPU_chunk);
thrust::host_vector<int> host_val(CPU_chunk);
// obtain raw pointer to device vectors memory
int * pd_key = thrust::raw_pointer_cast(&dev_key[0]);
int * pd_val = thrust::raw_pointer_cast(&dev_val[0]);
cout << "Creating map!!!" << endl;
//int rec = Nparts / GPU_chunk / 50;
//recording time
long time_start;
time_start = clock_get_msec();
#ifdef _DEBUG__LY__
for(int _ip = 0, _jp=0 ; _ip < Nparts, _jp<1; _jp ++ ){
#else
for(int _ip = 0, _jp=0 ; _ip < Nparts; _jp ++ ){
#endif
/*****************************************************************************************************************/
long time_loop_start = clock_get_msec();
cout << ">>>>CPU_chunk " << _jp << "--- Particles: " << CPU_chunk + _ip << "/"<< Nparts << "..." << endl;
long time_step_start = clock_get_msec();
int nmax = 0;
//tnmax is the number of particles read into the CPU memory from the hard drive
int tnmax = 0;
//read to CPU chunk
if( (Nparts - _ip) >= CPU_chunk ){//read a block of data
data_input_file.read((char*)particles, sizeof(MapParticle) * CPU_chunk);
_ip += CPU_chunk;
tnmax = CPU_chunk;
}else{
tnmax = (Nparts - _ip);
data_input_file.read((char*)particles, sizeof(MapParticle) * tnmax);
_ip += tnmax;
}
std::cout <<"1) read from disk cost: " << (clock_get_msec() - time_step_start) / 1000.0 << " secs. "<< std::endl;
//step 1: pre-deal with particles
//get the start point of pre-process data
time_step_start = clock_get_msec();
for(int _pt =0; _pt < CPU_chunk; ){
if( (Nparts - _pt) >= PRE_chunk ){//read a block of data
nmax = PRE_chunk;
}else{
nmax = (CPU_chunk - _pt);
}
cudaStatus = cudaMemcpy(dev_par, particles + _pt, sizeof(MapParticle) * nmax, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return false;
}
cudaStatus = doWithCuda_pre(PRE_chunk, Nside, theta0, 1, nmax, allskymap,
dev_par, particles + _pt, dev_rotm, dev_opos, pd_key + _pt, pd_val + _pt, _pt);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "pre_calculation Kernel failed!");
return false;
}
_pt += nmax;
}
//cudaFree(dev_par);
std::cout <<"2) pre-calculating cost: " << (clock_get_msec() - time_step_start) / 1000.0 << " secs. "<< std::endl;
//step 2: sort
time_step_start = clock_get_msec();
// interface to CUDA code
thrust::sort_by_key(dev_key.begin(), dev_key.end(), dev_val.begin());
thrust::copy(dev_val.begin(), dev_val.end(),host_val.begin());
//actually sort on the particles
for(int _pkk = CPU_chunk - 1; _pkk >=0; _pkk --){
int pg =host_val[_pkk];
sorted_particles[_pkk] = particles[pg];
}
{//swape the sorted particles with the unsorted particles
MapParticle * temp;
temp = particles;
particles = sorted_particles;
sorted_particles = temp;
}
std::cout <<"3) sort cost: " << (clock_get_msec() - time_step_start) / 1000.0 << " secs. "<< std::endl;
//step3: calculate flux
time_step_start = clock_get_msec();
for(int _pt =0, _ptn = 0; _pt < CPU_chunk; _ptn++ ){
if( (Nparts - _pt) >= GPU_chunk ){//read a block of data
nmax = GPU_chunk;
}else{
nmax = (CPU_chunk - _pt);
}
//if(_pt < 2031616){ _pt += nmax; continue;}
cudaStatus = cudaMemcpy(dev_par, particles + _pt, sizeof(MapParticle) * nmax, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return false;
}
//decide whether or not do step 1
//if > 5000 exceeds
cudaStatus = doWithCuda_Par(GPU_chunk, Nside, theta0, 1, nmax, allskymap,
dev_par, particles + _pt, dev_rotm, dev_opos);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "map calculation Kernel failed!");
return false;
}
_pt += nmax;
if(_ptn % 10 ==0 ){
std::cout << ".";// << _pt << "/" << CPU_chunk << endl;
std::cout.flush();
}
}
std::cout << endl;
std::cout <<"4) flux calculating cost: " << (clock_get_msec() - time_step_start) / 1000.0 << " secs. "<< std::endl;
std::cout << ">>>>chunk " << _jp << ": "<< (float)_ip / Nparts *100<<"% finished, costs " << (Real)(clock_get_msec() - time_loop_start) / 1000.0 <<
" secs, escaped: " << (Real)(clock_get_msec() - time_start) / 1000.0 << " secs\n" << endl;
_jp =_jp;
/*****************************************************************************************************************/
}
cout << endl;
cout << "Time cosumed: " << (Real)(clock_get_msec() - time_start) / 1000.0 << " seconds" << endl;
#ifdef _DEBUG__LY__
print_out_master(master);
#endif
//divide by solid angle of map's pixels
//conversion of allsky from g^2 cm^-5 to Gev^2 cm^-6 kpc
Real unit_factor = pow(pow((master -> natconst.c_in_cgs), 2) /
(master->units.eV_in_cgs * 1.0e9), 2) / (master->units.Mpc_in_cgs * 1.0e-3);
for(int i = 0; i < Npix_in_map; i++){
float amap = (float)((double)(unit_factor) /
(double)(master->map.dOmega) * double (fluxfactor) * (double) allskymap[i]);
allskymap[i] = amap;
}
#ifdef _DEBUG__LY__
Real rot1[9];
cudaStatus = cudaMemcpy(rot1, dev_rotm, 9*sizeof(Real), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return false;
}
// cout << "flux" << fluxes << endl;
cout << "unit_factor: " << unit_factor << endl;
cout << "dOmega: " << master->map.dOmega << endl;
cout << "All Skymap:" <<endl;
cout << "skymap[0]: " << allskymap[0] << endl;
cout << "skymap[1]: " << allskymap[1] << endl;
cout << "skymap[2]: " << allskymap[2] << endl;
cout << "skymap[100]: " << allskymap[100] << endl;
cout << "skymap[10000]: " << allskymap[10000] << endl;
cout << "skymap[1000000]: " << allskymap[1000000] << endl;
cout << "skymap[2977220]: " << allskymap[2977220] << endl;
/*for(int i =0; i < 12*512*512; i++){
if(allskymap[i] !=0 ){
cout << i <<"-----"<< allskymap[i]<<endl;
}
}*/
#endif
cout << "Writing to file \"" << *fits_filename << "\":" << endl;
ofstream output_file (fits_filename -> c_str(), ios::out | ios::binary);
if(output_file.good()){
output_file.write ((char *)allskymap, Npix_in_map * sizeof(Real));
}else{
cout << "Writing Error!";
}
output_file.close();
cout << "success!" << endl;
free(allskymap);
data_input_file.close();
//free(newskymap);
cudaFree(dev_rotm);
cudaFree(dev_opos);
cudaFree(dev_par);
//cudaFree(dev_allskymap);
return true;
}
|
a3b43bca75b5a14b1ecd8053426e2c1f41459aa3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaUZeroInit_kernel(unsigned int size, unsigned int* data)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride)
data[i] = 0U;
} | a3b43bca75b5a14b1ecd8053426e2c1f41459aa3.cu | #include "includes.h"
__global__ void cudaUZeroInit_kernel(unsigned int size, unsigned int* data)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride)
data[i] = 0U;
} |
ecd5ffc3ba664b33d4bd84272db0bc4d8a336f96.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "3rdParty\cuda-helper\helper_cuda.h"
#include "util\global.h"
#include "util\cuda_util.h"
#include "sourcing_types.h"
//------------------------------------------------------------------------
// Radial
//------------------------------------------------------------------------
__global__ void addSourcingToSubfieldRadial( float* origField ,size_t origPitch, size_2D origSize ,
float scalar, size_2D subOffset,
float radius, size_2D srcPos,
float hardness, bool falloff, dim3 N)
{
DEFINE_DEFAULT_KERNEL_WORKID_2D(i,j);
RETURN_IF_OUTSIDE_2D(i,j,N);
size_t origFieldIdxX = i+ subOffset.x;
size_t origFieldIdxZ = j+ subOffset.z;
//measure length between midIdx and current element as int-indices
int diffx = (srcPos.x - origFieldIdxX);
int diffz = (srcPos.z - origFieldIdxZ);
//transform back to 0...1 and compute vector length
//preserve aspect ratio!
float dist = 0.0f;
if(origSize.x < origSize.z )
{
dist = sqrt( pow((diffx / (float)(origSize.x-1)),2) +
pow((diffz /(float)(origSize.z-1)) *(origSize.z / origSize.x),2) );
}
else
{
dist = sqrt( pow((diffx / (float)(origSize.x-1))*(origSize.x / origSize.z),2) +
pow(diffz /(float)(origSize.z-1) ,2) );
}
if( dist > radius)
return;
float *cell = (float*)((char*)origField + origFieldIdxX * origPitch) + origFieldIdxZ;
if(falloff)
{
*cell = ( *cell + scalar > 0.0f ? *cell +
scalar * ( hardness * (1.0 - ( dist/radius)) )
: 0.0f);
}
else
{
*cell = ( *cell + scalar > 0.0f ? *cell + scalar : 0.0f);
}
}
extern void cw_radialSourcing(floatMem& heights, size_2D gridSize,
float perCellAmount,
size_2D fromIdx, size_2D toIdx, size_2D posIdx,
float radius, float hardness, bool useFalloff)
{
size_2D size;
size.x = (toIdx.x + 1) - fromIdx.x;
size.z = (toIdx.z + 1) - fromIdx.z;
hipLaunchKernelGGL(( addSourcingToSubfieldRadial) , dim3(getNumBlocks2D(size)), dim3(getThreadsPerBlock2D()) , 0, 0,
heights.devPtr, heights.pitch, gridSize,
perCellAmount, fromIdx,radius ,posIdx,hardness, useFalloff,
dim3FromSize_2D(size));
checkCudaErrors(hipGetLastError());
}
//------------------------------------------------------------------------
// Rectangular
//------------------------------------------------------------------------
__global__ void addSourcingToSubfield( float* origField ,size_t origPitch,
float scalar, size_2D subOffset, dim3 N)
{
DEFINE_DEFAULT_KERNEL_WORKID_2D(i,j);
RETURN_IF_OUTSIDE_2D(i,j,N);
float *cell = (float*)((char*)origField +
(i+ subOffset.x) * origPitch) + j + subOffset.z;
//avoid negative heights if its a sink
*cell = ( *cell + scalar > 0.0f ? *cell + scalar : 0.0f);
}
extern void cw_rectSourcing(floatMem& heights, size_2D gridSize,
float perCellAmount,
size_2D fromIdx, size_2D toIdx)
{
size_2D size;
size.x = (toIdx.x + 1) - fromIdx.x;
size.z = (toIdx.z + 1) - fromIdx.z;
hipLaunchKernelGGL(( addSourcingToSubfield) , dim3(getNumBlocks2D(size)), dim3(getThreadsPerBlock2D()) , 0, 0,
heights.devPtr, heights.pitch,
perCellAmount, fromIdx , dim3FromSize_2D(size));
checkCudaErrors(hipGetLastError());
} | ecd5ffc3ba664b33d4bd84272db0bc4d8a336f96.cu | #include <cuda_runtime.h>
#include "3rdParty\cuda-helper\helper_cuda.h"
#include "util\global.h"
#include "util\cuda_util.h"
#include "sourcing_types.h"
//------------------------------------------------------------------------
// Radial
//------------------------------------------------------------------------
__global__ void addSourcingToSubfieldRadial( float* origField ,size_t origPitch, size_2D origSize ,
float scalar, size_2D subOffset,
float radius, size_2D srcPos,
float hardness, bool falloff, dim3 N)
{
DEFINE_DEFAULT_KERNEL_WORKID_2D(i,j);
RETURN_IF_OUTSIDE_2D(i,j,N);
size_t origFieldIdxX = i+ subOffset.x;
size_t origFieldIdxZ = j+ subOffset.z;
//measure length between midIdx and current element as int-indices
int diffx = (srcPos.x - origFieldIdxX);
int diffz = (srcPos.z - origFieldIdxZ);
//transform back to 0...1 and compute vector length
//preserve aspect ratio!
float dist = 0.0f;
if(origSize.x < origSize.z )
{
dist = sqrt( pow((diffx / (float)(origSize.x-1)),2) +
pow((diffz /(float)(origSize.z-1)) *(origSize.z / origSize.x),2) );
}
else
{
dist = sqrt( pow((diffx / (float)(origSize.x-1))*(origSize.x / origSize.z),2) +
pow(diffz /(float)(origSize.z-1) ,2) );
}
if( dist > radius)
return;
float *cell = (float*)((char*)origField + origFieldIdxX * origPitch) + origFieldIdxZ;
if(falloff)
{
*cell = ( *cell + scalar > 0.0f ? *cell +
scalar * ( hardness * (1.0 - ( dist/radius)) )
: 0.0f);
}
else
{
*cell = ( *cell + scalar > 0.0f ? *cell + scalar : 0.0f);
}
}
extern void cw_radialSourcing(floatMem& heights, size_2D gridSize,
float perCellAmount,
size_2D fromIdx, size_2D toIdx, size_2D posIdx,
float radius, float hardness, bool useFalloff)
{
size_2D size;
size.x = (toIdx.x + 1) - fromIdx.x;
size.z = (toIdx.z + 1) - fromIdx.z;
addSourcingToSubfieldRadial <<< getNumBlocks2D(size), getThreadsPerBlock2D() >>>
(heights.devPtr, heights.pitch, gridSize,
perCellAmount, fromIdx,radius ,posIdx,hardness, useFalloff,
dim3FromSize_2D(size));
checkCudaErrors(cudaGetLastError());
}
//------------------------------------------------------------------------
// Rectangular
//------------------------------------------------------------------------
__global__ void addSourcingToSubfield( float* origField ,size_t origPitch,
float scalar, size_2D subOffset, dim3 N)
{
DEFINE_DEFAULT_KERNEL_WORKID_2D(i,j);
RETURN_IF_OUTSIDE_2D(i,j,N);
float *cell = (float*)((char*)origField +
(i+ subOffset.x) * origPitch) + j + subOffset.z;
//avoid negative heights if its a sink
*cell = ( *cell + scalar > 0.0f ? *cell + scalar : 0.0f);
}
extern void cw_rectSourcing(floatMem& heights, size_2D gridSize,
float perCellAmount,
size_2D fromIdx, size_2D toIdx)
{
size_2D size;
size.x = (toIdx.x + 1) - fromIdx.x;
size.z = (toIdx.z + 1) - fromIdx.z;
addSourcingToSubfield <<< getNumBlocks2D(size), getThreadsPerBlock2D() >>>
(heights.devPtr, heights.pitch,
perCellAmount, fromIdx , dim3FromSize_2D(size));
checkCudaErrors(cudaGetLastError());
} |
466693fe5272878d36943209098cb484326b13d7.hip | // !!! This is a file automatically generated by hipify!!!
// Must be done...
#include <assert.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <string>
#include <sstream>
#include <cstdlib>
#include <limits>
// #include "../../GPUOperations.h"
// #include "../initialisation/initialisation.cuh"
#include "statisticOperationsKernel.cuh"
#include "../../GpuMatrix.hpp"
#include "../generalInformation/generalInformation.hpp"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define assertm(exp, msg) assert(((void)msg, exp))
#define THREADS_PER_BLOCK_DIM 16
#define carre(x) (x*x)
#define minHost(a, b) (((a) < (b)) ? (a) : (b))
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
std::cerr << hipGetErrorString(code) << " file : " << file << " line : " << line << std::endl;
if (abort) { exit(code); }
}
}
template <typename T>
T GpuMatrix<T>::minGpuMatrix(void){
const size_t SIZE = this->ROWS*this->COLUMNS*sizeof(T);
int* mutex = 0;
T *dmin, *da;
T *minValue = new T;
gpuErrchk(hipMalloc((void**)&da, SIZE));
gpuErrchk(hipMalloc((void**)&dmin, sizeof(T)));
gpuErrchk(hipMalloc((void**)&mutex, sizeof(int)));
if (std::numeric_limits<T>::has_infinity){
const T max = std::numeric_limits<T>::max();
gpuErrchk(hipMemset(dmin, max , sizeof(T)));
}
else {
exit(1);
}
gpuErrchk(hipMemset(mutex, 0, sizeof(int)));
gpuErrchk(hipMemcpy(da, this->data, SIZE, hipMemcpyHostToDevice));
// gpuErrchk(hipMemcpy(dmin, min, sizeof(T), hipMemcpyHostToDevice));
dim3 blocksPerGrid(minHost(ceil((float)this->ROWS/(float)THREADS_PER_BLOCK_DIM), deviceProps.maxGridSize[0]) , minHost(ceil((float)this->COLUMNS/(float)THREADS_PER_BLOCK_DIM), deviceProps.maxGridSize[1]));
dim3 threadsPerBlock(THREADS_PER_BLOCK_DIM, THREADS_PER_BLOCK_DIM);
// minGPU<<<blocksPerGrid, threadsPerBlock, ceil(((this->ROWS*this->COLUMNS)/(blocksPerGrid.x*THREADS_PER_BLOCK_DIM*blocksPerGrid.y*THREADS_PER_BLOCK_DIM+1))*256*sizeof(T))>>>(da, dmin, this->ROWS, this->COLUMNS, mutex);
float sharedMemorySize = (float)(this->ROWS*this->COLUMNS)/(float)(carre(THREADS_PER_BLOCK_DIM)* blocksPerGrid.x * blocksPerGrid.y);
hipLaunchKernelGGL(( minGPU), dim3(blocksPerGrid), dim3(threadsPerBlock), ceil(sharedMemorySize)*carre(THREADS_PER_BLOCK_DIM)*sizeof(T), 0, da, dmin, this->ROWS, this->COLUMNS, mutex);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(minValue, dmin, sizeof(T), hipMemcpyDeviceToHost));
gpuErrchk(hipFree(dmin));
gpuErrchk(hipFree(da));
// std::cout << "MinGPU will be " << *minValue << std::endl;
gpuErrchk(hipFree(mutex));
return *minValue;
}
template <typename T>
T GpuMatrix<T>::maxGpuMatrix(void){
const size_t SIZE = this->ROWS*this->COLUMNS*sizeof(T);
int* mutex = 0;
T *dmax, *da;
T *maxValue = new T;
gpuErrchk(hipMalloc((void**)&da, SIZE));
gpuErrchk(hipMalloc((void**)&dmax, sizeof(T)));
gpuErrchk(hipMalloc((void**)&mutex, sizeof(int)));
if (std::numeric_limits<T>::has_infinity){
const T min = std::numeric_limits<T>::min();
gpuErrchk(hipMemset(dmax, min , sizeof(T)));
}
else {
exit(1);
}
gpuErrchk(hipMemset(mutex, 0, sizeof(int)));
gpuErrchk(hipMemcpy(da, this->data, SIZE, hipMemcpyHostToDevice));
// gpuErrchk(hipMemcpy(dmin, min, sizeof(T), hipMemcpyHostToDevice));
dim3 blocksPerGrid(minHost(ceil((float)this->ROWS/(float)THREADS_PER_BLOCK_DIM), deviceProps.maxGridSize[0]) , minHost(ceil((float)this->COLUMNS/(float)THREADS_PER_BLOCK_DIM), deviceProps.maxGridSize[1]));
dim3 threadsPerBlock(THREADS_PER_BLOCK_DIM, THREADS_PER_BLOCK_DIM);
// minGPU<<<blocksPerGrid, threadsPerBlock, ceil(((this->ROWS*this->COLUMNS)/(blocksPerGrid.x*THREADS_PER_BLOCK_DIM*blocksPerGrid.y*THREADS_PER_BLOCK_DIM+1))*256*sizeof(T))>>>(da, dmin, this->ROWS, this->COLUMNS, mutex);
float sharedMemorySize = (float)(this->ROWS*this->COLUMNS)/(float)(carre(THREADS_PER_BLOCK_DIM)* blocksPerGrid.x * blocksPerGrid.y);
hipLaunchKernelGGL(( maxGPU), dim3(blocksPerGrid), dim3(threadsPerBlock), ceil(sharedMemorySize)*carre(THREADS_PER_BLOCK_DIM)*sizeof(T), 0, da, dmax, this->ROWS, this->COLUMNS, mutex);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(maxValue, dmax, sizeof(T), hipMemcpyDeviceToHost));
gpuErrchk(hipFree(dmax));
gpuErrchk(hipFree(da));
gpuErrchk(hipFree(mutex));
return *maxValue;
}
template <typename T>
T GpuMatrix<T>::meanGpuMatrix(void){
const size_t SIZE = this->ROWS*this->COLUMNS*sizeof(T);
int* mutex = 0;
T *dmean, *da;
T *meanValue = new T;
gpuErrchk(hipMalloc((void**)&da, SIZE));
gpuErrchk(hipMalloc((void**)&dmean, sizeof(T)));
gpuErrchk(hipMalloc((void**)&mutex, sizeof(int)));
gpuErrchk(hipMemset(mutex, 0, sizeof(int)));
gpuErrchk(hipMemset(dmean, 0, sizeof(T)));
gpuErrchk(hipMemcpy(da, this->data, SIZE, hipMemcpyHostToDevice));
// gpuErrchk(hipMemcpy(dmin, min, sizeof(T), hipMemcpyHostToDevice));
dim3 blocksPerGrid(minHost(ceil((float)this->ROWS/(float)THREADS_PER_BLOCK_DIM), deviceProps.maxGridSize[0]) , minHost(ceil((float)this->COLUMNS/(float)THREADS_PER_BLOCK_DIM), deviceProps.maxGridSize[1]));
dim3 threadsPerBlock(THREADS_PER_BLOCK_DIM, THREADS_PER_BLOCK_DIM);
// minGPU<<<blocksPerGrid, threadsPerBlock, ceil(((this->ROWS*this->COLUMNS)/(blocksPerGrid.x*THREADS_PER_BLOCK_DIM*blocksPerGrid.y*THREADS_PER_BLOCK_DIM+1))*256*sizeof(T))>>>(da, dmin, this->ROWS, this->COLUMNS, mutex);
float sharedMemorySize = (float)(this->ROWS*this->COLUMNS)/(float)(carre(THREADS_PER_BLOCK_DIM)* blocksPerGrid.x * blocksPerGrid.y);
hipLaunchKernelGGL(( meanGPU), dim3(blocksPerGrid), dim3(threadsPerBlock), ceil(sharedMemorySize)*carre(THREADS_PER_BLOCK_DIM)*sizeof(T), 0, da, dmean, this->ROWS, this->COLUMNS, mutex);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(meanValue, dmean, sizeof(T), hipMemcpyDeviceToHost));
gpuErrchk(hipFree(dmean));
gpuErrchk(hipFree(da));
gpuErrchk(hipFree(mutex));
return *(meanValue)/(this->ROWS*this->COLUMNS);
}
// int main(void){
// double sum;
// struct Matrix<double> matrix= Matrix<double>{10000, 10000, new double[10000*10000]};
// for (unsigned int i = 0; i<matrix.ROWS*matrix.COLUMNS; i++){
// matrix.data[i] = (rand() % 100)+5;
// sum += matrix.data[i];
// // std::cout << "Value " << i << " : " << matrix.data[i] << " ---" << std::flush;
// }
// double minGPU = maxGPUMatrixFunction(matrix);
// std::cout << "Max GPU : " << minGPU << std::endl;
// std::cout << "Moyenne CPU : " << sum/(matrix.COLUMNS*matrix.ROWS) << std::endl;
// delete [] matrix.data;
// return 0;
// }
// template int GpuMatrix<int>::minGpuMatrix(void);
// template float GpuMatrix<float>::minGpuMatrix(void);
// template double GpuMatrix<double>::minGpuMatrix(void); | 466693fe5272878d36943209098cb484326b13d7.cu | // Must be done...
#include <assert.h>
#include <iostream>
#include <cuda.h>
#include <stdio.h>
#include <math.h>
#include <string>
#include <sstream>
#include <cstdlib>
#include <limits>
// #include "../../GPUOperations.h"
// #include "../initialisation/initialisation.cuh"
#include "statisticOperationsKernel.cuh"
#include "../../GpuMatrix.hpp"
#include "../generalInformation/generalInformation.hpp"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define assertm(exp, msg) assert(((void)msg, exp))
#define THREADS_PER_BLOCK_DIM 16
#define carre(x) (x*x)
#define minHost(a, b) (((a) < (b)) ? (a) : (b))
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
std::cerr << cudaGetErrorString(code) << " file : " << file << " line : " << line << std::endl;
if (abort) { exit(code); }
}
}
template <typename T>
T GpuMatrix<T>::minGpuMatrix(void){
const size_t SIZE = this->ROWS*this->COLUMNS*sizeof(T);
int* mutex = 0;
T *dmin, *da;
T *minValue = new T;
gpuErrchk(cudaMalloc((void**)&da, SIZE));
gpuErrchk(cudaMalloc((void**)&dmin, sizeof(T)));
gpuErrchk(cudaMalloc((void**)&mutex, sizeof(int)));
if (std::numeric_limits<T>::has_infinity){
const T max = std::numeric_limits<T>::max();
gpuErrchk(cudaMemset(dmin, max , sizeof(T)));
}
else {
exit(1);
}
gpuErrchk(cudaMemset(mutex, 0, sizeof(int)));
gpuErrchk(cudaMemcpy(da, this->data, SIZE, cudaMemcpyHostToDevice));
// gpuErrchk(cudaMemcpy(dmin, min, sizeof(T), cudaMemcpyHostToDevice));
dim3 blocksPerGrid(minHost(ceil((float)this->ROWS/(float)THREADS_PER_BLOCK_DIM), deviceProps.maxGridSize[0]) , minHost(ceil((float)this->COLUMNS/(float)THREADS_PER_BLOCK_DIM), deviceProps.maxGridSize[1]));
dim3 threadsPerBlock(THREADS_PER_BLOCK_DIM, THREADS_PER_BLOCK_DIM);
// minGPU<<<blocksPerGrid, threadsPerBlock, ceil(((this->ROWS*this->COLUMNS)/(blocksPerGrid.x*THREADS_PER_BLOCK_DIM*blocksPerGrid.y*THREADS_PER_BLOCK_DIM+1))*256*sizeof(T))>>>(da, dmin, this->ROWS, this->COLUMNS, mutex);
float sharedMemorySize = (float)(this->ROWS*this->COLUMNS)/(float)(carre(THREADS_PER_BLOCK_DIM)* blocksPerGrid.x * blocksPerGrid.y);
minGPU<<<blocksPerGrid, threadsPerBlock, ceil(sharedMemorySize)*carre(THREADS_PER_BLOCK_DIM)*sizeof(T)>>>(da, dmin, this->ROWS, this->COLUMNS, mutex);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(minValue, dmin, sizeof(T), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(dmin));
gpuErrchk(cudaFree(da));
// std::cout << "MinGPU will be " << *minValue << std::endl;
gpuErrchk(cudaFree(mutex));
return *minValue;
}
template <typename T>
T GpuMatrix<T>::maxGpuMatrix(void){
const size_t SIZE = this->ROWS*this->COLUMNS*sizeof(T);
int* mutex = 0;
T *dmax, *da;
T *maxValue = new T;
gpuErrchk(cudaMalloc((void**)&da, SIZE));
gpuErrchk(cudaMalloc((void**)&dmax, sizeof(T)));
gpuErrchk(cudaMalloc((void**)&mutex, sizeof(int)));
if (std::numeric_limits<T>::has_infinity){
const T min = std::numeric_limits<T>::min();
gpuErrchk(cudaMemset(dmax, min , sizeof(T)));
}
else {
exit(1);
}
gpuErrchk(cudaMemset(mutex, 0, sizeof(int)));
gpuErrchk(cudaMemcpy(da, this->data, SIZE, cudaMemcpyHostToDevice));
// gpuErrchk(cudaMemcpy(dmin, min, sizeof(T), cudaMemcpyHostToDevice));
dim3 blocksPerGrid(minHost(ceil((float)this->ROWS/(float)THREADS_PER_BLOCK_DIM), deviceProps.maxGridSize[0]) , minHost(ceil((float)this->COLUMNS/(float)THREADS_PER_BLOCK_DIM), deviceProps.maxGridSize[1]));
dim3 threadsPerBlock(THREADS_PER_BLOCK_DIM, THREADS_PER_BLOCK_DIM);
// minGPU<<<blocksPerGrid, threadsPerBlock, ceil(((this->ROWS*this->COLUMNS)/(blocksPerGrid.x*THREADS_PER_BLOCK_DIM*blocksPerGrid.y*THREADS_PER_BLOCK_DIM+1))*256*sizeof(T))>>>(da, dmin, this->ROWS, this->COLUMNS, mutex);
float sharedMemorySize = (float)(this->ROWS*this->COLUMNS)/(float)(carre(THREADS_PER_BLOCK_DIM)* blocksPerGrid.x * blocksPerGrid.y);
maxGPU<<<blocksPerGrid, threadsPerBlock, ceil(sharedMemorySize)*carre(THREADS_PER_BLOCK_DIM)*sizeof(T)>>>(da, dmax, this->ROWS, this->COLUMNS, mutex);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(maxValue, dmax, sizeof(T), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(dmax));
gpuErrchk(cudaFree(da));
gpuErrchk(cudaFree(mutex));
return *maxValue;
}
template <typename T>
T GpuMatrix<T>::meanGpuMatrix(void){
const size_t SIZE = this->ROWS*this->COLUMNS*sizeof(T);
int* mutex = 0;
T *dmean, *da;
T *meanValue = new T;
gpuErrchk(cudaMalloc((void**)&da, SIZE));
gpuErrchk(cudaMalloc((void**)&dmean, sizeof(T)));
gpuErrchk(cudaMalloc((void**)&mutex, sizeof(int)));
gpuErrchk(cudaMemset(mutex, 0, sizeof(int)));
gpuErrchk(cudaMemset(dmean, 0, sizeof(T)));
gpuErrchk(cudaMemcpy(da, this->data, SIZE, cudaMemcpyHostToDevice));
// gpuErrchk(cudaMemcpy(dmin, min, sizeof(T), cudaMemcpyHostToDevice));
dim3 blocksPerGrid(minHost(ceil((float)this->ROWS/(float)THREADS_PER_BLOCK_DIM), deviceProps.maxGridSize[0]) , minHost(ceil((float)this->COLUMNS/(float)THREADS_PER_BLOCK_DIM), deviceProps.maxGridSize[1]));
dim3 threadsPerBlock(THREADS_PER_BLOCK_DIM, THREADS_PER_BLOCK_DIM);
// minGPU<<<blocksPerGrid, threadsPerBlock, ceil(((this->ROWS*this->COLUMNS)/(blocksPerGrid.x*THREADS_PER_BLOCK_DIM*blocksPerGrid.y*THREADS_PER_BLOCK_DIM+1))*256*sizeof(T))>>>(da, dmin, this->ROWS, this->COLUMNS, mutex);
float sharedMemorySize = (float)(this->ROWS*this->COLUMNS)/(float)(carre(THREADS_PER_BLOCK_DIM)* blocksPerGrid.x * blocksPerGrid.y);
meanGPU<<<blocksPerGrid, threadsPerBlock, ceil(sharedMemorySize)*carre(THREADS_PER_BLOCK_DIM)*sizeof(T)>>>(da, dmean, this->ROWS, this->COLUMNS, mutex);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(meanValue, dmean, sizeof(T), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(dmean));
gpuErrchk(cudaFree(da));
gpuErrchk(cudaFree(mutex));
return *(meanValue)/(this->ROWS*this->COLUMNS);
}
// int main(void){
// double sum;
// struct Matrix<double> matrix= Matrix<double>{10000, 10000, new double[10000*10000]};
// for (unsigned int i = 0; i<matrix.ROWS*matrix.COLUMNS; i++){
// matrix.data[i] = (rand() % 100)+5;
// sum += matrix.data[i];
// // std::cout << "Value " << i << " : " << matrix.data[i] << " ---" << std::flush;
// }
// double minGPU = maxGPUMatrixFunction(matrix);
// std::cout << "Max GPU : " << minGPU << std::endl;
// std::cout << "Moyenne CPU : " << sum/(matrix.COLUMNS*matrix.ROWS) << std::endl;
// delete [] matrix.data;
// return 0;
// }
// template int GpuMatrix<int>::minGpuMatrix(void);
// template float GpuMatrix<float>::minGpuMatrix(void);
// template double GpuMatrix<double>::minGpuMatrix(void); |
348ba5aa48f79e6ffa9e87d228fafae34b1b9683.hip | // !!! This is a file automatically generated by hipify!!!
#include <omp.h>
#include <stdio.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <crtdbg.h>
#include <hip/hip_runtime.h>
#include <afxwin.h>
#include <iostream>
#include <vector>
#include <fstream>
using namespace std;
#define CRTDBG_MAP_ALLOC
const short int ND = 1;
const short int ND3 = 1;
const short int NI = 56;
const short int NJ = 56;
const short int NK = 90;
const short int NL = 344;
const short int NPARM = 35;
const short int NCELL = 14;
const short int INFTIME = 9999;
const short int ANISO = 1; // aniso switch
const short int NCYCL = 20; // max cycle num
const short int TSTEP = 2000;
const short int NENDO = 4000;
char flg_thread=1,flg_display,flg_calculate,flg_stop;
char flg_calcu_option;
float HRTscale,HRTx0,HRTy0,HRTz0,phai,pusai,theta;
short int ipttl[4][NI*ND*ND],nttl,idist,iHB[4][50*ND3],iBB[3][50*ND3];
short int kTop,kBtm,kVtr,nHB,nBB;
short int ic,ibbDLY,idltt;
short int ibbSTEP;
char flag_flop;
char *mapCell[NK];
short int nPos,nv[3],maxXctStep;
short int *iparm;
float *ydata[NCELL];
float tmswf[3][6],alp;
short int la012[NCELL],la0123[NCELL];
float *r[3],*rn[3];
float *aw[NL], *bw;
short int *kmin, *kmax;
short int *mag[4];
short int *mapAPD[NK];
short int *mapACT[NK];
short int nbbSTEP;
short int *mapSpeed[NK];
short int *mapXCTm[NCYCL]; // store the exciting time
int NendoB, NendoC;
short int endoBx[NENDO*ND3];
short int endoBy[NENDO*ND3];
short int endoBz[NENDO*ND3];
short int endoCx[NENDO*ND3];
short int endoCy[NENDO*ND3];
short int endoCz[NENDO*ND3];
// epicardial variable
const short int Nepic=NI*NJ*2;//short int Nepic
vector<short int> epicX; // x
vector<short int> epicY; // y
vector<short int> epicZ; // z
short int epicX_old[Nepic];
short int epicY_old[Nepic];
short int epicZ_old[Nepic];
float *POTi;
float *POT[NL],*POT_reduce[NL];//*POT_reduce[NL] by sf 090622
float VCG[3],bufVCG[2][3],bufGRD;
short int nTimeStep,itbuf,nextStep;
short int *iStep;
char answer;
long mNub;
long *locXCT[NK];
long totalCell;
// anisotropy variables
short int maxlay;
float *fibdir[3];
float vl2[10], vt2[10], rrat1;
float planedir[3][30];
float prx[12][12], pry[12][12], prz[12][12];
float xaxis[3],yaxis[3],zaxis[3];
short int mBCL,miBN,mxcycle,idltc,mS2ST,mS2CL,mS2BN;
short int ipstm[3][NI*ND*ND];
short int vHB[NCYCL][50*ND3];
short int excited=0;
CString dataPath="E:\\chuan50\\";
const short int useGPU=1,gpuspeed=17;//by sf 090403 useCPU 1--yes 0--no gpuspeed 1 or 17
short int GPUnum=1,corenum=0;//by sf 090823 the number of GPU device,allnumGPUnum,corenum
//
short int threadnum=4;//by sf 090403 threadnum<0 auto >0 set number of thread=threadnum
short int iTimebegin=1,iTimeend;
float **gatheralldpl;//by sf 090408 for write dpl[3] in BSPitmm
int **gatherallijk,*countallijk,*countallijk_reduce,*itask[2],*iloops[3],isumdipoles=0;//,*iTimetid;//by sf 090408 for write the ijk of dpl[3] in BSPitmm
double starttime,endtime;
double bsptime[4] = {0.0,0.0,0.0,0.0};
int BSPitmmcount(short int iTime0);
void rdHRT(void);
void rdpos(void);
void rdnod(void);
void rdmtx(void);
void rdelc(void);
void locfile(void);
void ECGcal(void);
void geoinfc(void);
void setaniso(void);
void neibdir(void);
void stminvx(short int);
void XCTinvcm(void);
void fibplane(void);
void fibdirct(void);
void savACT(void);
//void savACT(int myid);
void freeFibdir(void);
void freemapAPDcs(void);
void freemapAPD(void);
void freebrs(void);
void freemagcs(void);
void freePOTcs(void);
float *d_r,*d_rn,*d_tm;
short int *d_tnd;
float *d_POTi=0, *d_der=0,*d_endoHnnA=0,*d_surfPOTi=0;
short int *d_endoBx=0;
short int *d_endoBy=0;
short int *d_endoBz=0;
short int *d_endoCx=0;
short int *d_endoCy=0;
short int *d_endoCz=0;
short int *d_epicX=0;
short int *d_epicY=0;
short int *d_epicZ=0;
float *d_epicPOTold=0;
//------------ 2009-2-6-16 BY SWF---------
// comment:
extern "C" short int cudamain(int argc, char** argv);
extern "C" void gpu_freetransdata();
extern "C" void gpu_transdata(short int epicX[Nepic],short int epicY[Nepic],short int epicZ[Nepic],short int *g_tnd[3],float *g_r[3],float *g_rn[3],short int g_endoBx[NENDO*ND3],short int g_endoBy[NENDO*ND3],short int g_endoBz[NENDO*ND3],short int g_endoCx[NENDO*ND3],short int g_endoCy[NENDO*ND3],short int g_endoCz[NENDO*ND3],float g_tm[3][6]);
extern "C" void gpu_BSPitmm_Malloc(float *g_POTi,float g_der[NL],float *g_endoHnnA,float *g_surfPOTi);
extern "C" void gpu_BSPitmm_HostToDevice(float *g_POTi,float g_der[NL],float *g_endoHnnA,float *g_surfPOTi);
extern "C" void gpu_BSPitmm_DeviceToHost(float *g_epicPOTold,float *g_POTi,float g_der[NL],float *g_endoHnnA,float *g_surfPOTi);
extern "C" void gpu_dpl_all(short int do_epicPOT,float g_posi,float g_posj,float g_posk,short int g_nPos,float g_dpl[3],float *g_POTi,float g_der[NL],
float g_HRTx0,float g_HRTy0,float g_HRTz0,int g_NendoB,int g_NendoC,
float *g_endoHnnA,short int *g_endoBx,short int *g_endoBy,short int *g_endoBz,float g_tm[3][6],float *g_epicPOTold);
extern "C" void gpu_dpl_nPos(float g_posi,float g_posj,float g_posk,short int g_nPos,float g_dpl[3],float *g_POTi,float g_der[NL]);
extern "C" void gpu_dpl_nPos_2(float g_posi,float g_posj,float g_posk,float g_dpl[3]);
extern "C" void gpu_dpl_Nendo(float g_posi,float g_posj,float g_posk,float g_HRTx0,float g_HRTy0,float g_HRTz0,
int g_NendoBC,int g_offset,float g_dpl[3],float *g_endoHnnA,
short int *g_endoBx,short int *g_endoBy,short int *g_endoBz,float g_tm[3][6]);
extern "C" void gpu_dpl_Nepic(float g_posi,float g_posj,float g_posk,float g_HRTx0,float g_HRTy0,float g_HRTz0,
float g_dpl[3],float g_tm[3][6],float *g_epicPOTold);
//extern "C" void dplpro(float *POTi,const short int NL, const float **r);
//------------ 2009-2-6-16 BY SWF---------
//int main(int argc,char *argv[])
//void hpc(int argc, char** argv)
void main(int argc, char** argv)
{
int myid, numprocs;
int namelen;
//------------ 2009-2-6-16 BY SWF---------
// comment:
FILE *fptime;
//------------ 2009-2-6-16 BY SWF---------
short int ipttl[4][56];
HFILE hFp;
short int nVCG,BSPm,mTime,iTime,i,j,k;
short int nsnrt;
float *VCGs[3];
float eff;
float *endoHnnA;
float *endoPOT[TSTEP];
short int index;
int nn,n0,n1,n2,ni;
float pi=3.14159;
short int *tnd[3];
int li;
void XCTcalm(void);
//void XCTcalm(int myid);
void BSPcalm(void);
void rdAPDm(void);
void freeXCTm(void);
fprintf(stdout, "Begin computing. %f\n", clock());
for(i=0;i<NK;i++) {
mapCell[i] = (char *) malloc(NI*NJ);
mapAPD[i] = (short int *) malloc(NI*NJ*2);
mapSpeed[i] = (short int *) malloc(NI*NJ*2);
mapACT[i] = (short int *) malloc(NI*NJ*2);
locXCT[i] = (long *) malloc(NI*NJ*4);
if((mapCell[i]==NULL)||(mapAPD[i]==NULL)||(mapACT[i]==NULL)||(locXCT[i]==NULL)) {
fprintf(stdout,"out of memory\n");
fflush(stdout);
return;
}
}
iparm = (short int *) malloc(NCELL*NPARM*2);
kmin = (short int *) malloc(NI*NJ*2);
kmax = (short int *) malloc(NI*NJ*2);
iStep = (short int *) malloc(TSTEP*2);
if((iparm==NULL)||(kmin==NULL)||(kmax==NULL)||(iStep==NULL)) {
fprintf(stdout,"out of memory\n");
fflush(stdout);
return;
}
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
return;
}
for(i=0;i<3;i++) {
r[i] = (float *) malloc(NL*4);
rn[i] = (float *) malloc(NL*4);
if((r[i]==NULL)||(rn[i]==NULL)) {
fprintf(stdout,"out of memory\n");
fflush(stdout);
return;
}
}
for(i=0;i<NCELL;i++) {
ydata[i] = (float *) malloc(1000*ND*4);
if(ydata[i]==NULL) {
fprintf(stdout,"out of memory\n");
fflush(stdout);
return; }
}
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
return;
}
for(i=0;i<4;i++) {
mag[i] = (short int *) malloc(50000*ND3*2);
if(mag[i]==NULL) {
fprintf(stdout,"out of memory\n");
fflush(stdout);
return; }
}
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=0;i<NI;i++) {
*(mapAPD[k]+j*NI+i)=0;
*(mapSpeed[k]+j*NI+i)=0;
}
}
}
for(i=0;i<4;i++) {
for(li=0;li<50000*ND3;li++) {
*(mag[i]+li)=0;
}
}
//TRACE("\nReading HRT file ...");
rdHRT();
if(flag_flop||(flg_thread==0)) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
//TRACE("\nReading APD file ...");
rdAPDm();
if(flag_flop||(flg_thread==0)) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
//TRACE("\nReading POS file ...");
rdpos();
if(flag_flop||(flg_thread==0)) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
//TRACE("\nReading NOD file ...");
rdnod();
if(flag_flop||(flg_thread==0)) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
//TRACE("\nReading ELC file ...");
rdelc();
if(flag_flop||(flg_thread==0)) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
//TRACE("\nLocating Cell Sequence ...");
locfile();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
//TRACE("\nFinding Geometric Info ...");
geoinfc();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
if (ANISO==1) {
//TRACE("\nCalculating Rotating Anisotropy ...");
for (i=0; i<3; i++) {
fibdir[i] = (float *) malloc(50000*ND3*4);
if (fibdir[i]==NULL) {
fprintf(stdout,"Out of memory ! !\n");
fflush(stdout);
return;// 0;
}
}
for(i=0;i<3;i++) {
for(li=0;li<50000*ND3;li++) {
*(fibdir[i]+li)=0.;
}
}
//TRACE("\nCalculating setaniso ...");
setaniso();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nCalculating neibdir ...");
neibdir();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nCalculating stminvx ...");
stminvx(50);
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nCalculating XCTinvcm ...");
XCTinvcm();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nCalculating fibplane ...");
fibplane();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nCalculating fibdirct ...");
fibdirct();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nCompleting Rotating Anisotropy ...");
}
//TRACE("\nStimulus calculating ...");
stminvx(20*ND);
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nExcitation estimating ...");
XCTinvcm();
savACT();
fflush(stdout);
freemagcs();
for(i=0;i<NCYCL;i++) {
mapXCTm[i]=(short int *) malloc(50000*ND3*2);
if((mapXCTm[i]==NULL)) {
fprintf(stdout,"Out of memory ! !\n");
fflush(stdout);
return;// 0;
}
}
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freeXCTm();
freeFibdir();
return;
}
//TRACE("\nExcitation calculating ...");
XCTcalm();
//fprintf(stdout,"XCTcalm()ok=;myid=%d \n",myid);
fflush(stdout);
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freeXCTm();
freeFibdir();
return;
}
if(!flg_calcu_option) {
for(i=0;i<NL;i++) {
POT[i]=(float *) malloc(TSTEP*4);
POT_reduce[i]=(float *) malloc(TSTEP*4);//by sf 090622
aw[i]=(float *) malloc(NL*4);
if((POT[i]==NULL)||(aw[i]==NULL)) {
// MessageBox(NULL,"Out of memory !",NULL,MB_OK);
cout<<"Out of memory !"<<endl;
exit(0);
}
}
for(i=0;i<NL;i++) {
for(j=0;j<TSTEP;j++) {
*(POT[i]+j)=(float)0;
*(POT_reduce[i]+j)=(float)0;//by sf 090622
}
}
bw=(float *) malloc(NL*4);
POTi=(float *) malloc(NL*4);
if((POTi==NULL)||(bw==NULL)) {
// MessageBox(NULL,"Out of memory !",NULL,MB_OK);
cout<<"Out of memory !"<<endl;
exit(0);
}
for(i=0;i<NL;i++) *(POTi+i)=(float)0;
if(flg_thread==0) {
freemapAPD();
freemapAPDcs();
freebrs();
freeXCTm();
freePOTcs();
freeFibdir();
return;
}
//TRACE("\nReading MTX file ...");
rdmtx();
if(flag_flop||(flg_thread==0)) {
freemapAPD();
freemapAPDcs();
freebrs();
freeXCTm();
freePOTcs();
freeFibdir();
return;
}
//TRACE("\nBSPM calculating ...");
//------------ 2009-2-4-15 BY SWF---------
// comment: test data trans
//int mydata[2]={20,60};
//printf("my%d,%d\n", mydata[0],mydata[1]);
//printf("aa%f,%f\n", *POTi,*(POTi+1));
//dplpro(POTi,NL,r);
//printf("aa%f,%f\n", *POTi,*(POTi+1));
//printf("my%d,%d\n", mydata[0],mydata[1]);
//------------ 2009-2-4-15 BY SWF---------
//------------ 2009-2-6-16 BY SWF---------
// comment:
starttime = clock();
fprintf(stdout,"starttime = %f\n", starttime);
//------------ 2009-2-6-16 BY SWF---------
if (useGPU==1)
{
GPUnum=cudamain(argc, argv);
fprintf(stdout,"GPUnum = %d", GPUnum);
};
BSPcalm();
//------------ 2009-2-6-16 BY SWF---------
// comment:
endtime = clock();
fprintf(stdout,"\nendtime = %f\n", endtime);
/*if (myid==0)
{
//fprintf(stdout,"sd test- endtime = %f,all-time = %f,threadnum=%d,useGPU=%d,numprocs=%d,nTimeStep=%d\n", starttime,(endtime-starttime)/CLK_TCK,threadnum,useGPU,numprocs,nTimeStep);
fprintf(stdout,"sd test all-time=%f,useGPU=%d,threadnum=%d,numprocs=%d,nTimeStep=%d\n",(endtime-starttime)/CLK_TCK,useGPU,threadnum,numprocs,nTimeStep);
fptime=fopen(dataPath+"gputime.txt","a") ;
fprintf(fptime,"sd test all-time=%f,useGPU=%d,threadnum=%d,numprocs=%d,nTimeStep=%d\n",(endtime-starttime)/CLK_TCK,useGPU,threadnum,numprocs,nTimeStep);
fclose(fptime);
//fptime=fopen(dataPath+"task.txt","a") ;
//fprintf(fptime,"sd test all-time=%f,useGPU=%d,threadnum=%d,numprocs=%d,nTimeStep=%d\n",(endtime-starttime)/CLK_TCK,useGPU,threadnum,numprocs,nTimeStep);
//for(i=0;i<2;i=i+1)
//{
// for(j=0;j<=nTimeStep;j=j+1)
// {
// fprintf(fptime,"itask[%d][%d]=%d\n",i,j,*(itask[i]+j));
// }
//};
//for(i=0;i<3;i=i+1)
//{
// for(j=0;j<=nTimeStep;j=j+1)
// {
// fprintf(fptime,"iloops[%d][%d]=%d\n",i,j,*(iloops[i]+j));
// }
//};
//fclose(fptime);
}*/
//------------ 2009-2-6-16 BY SWF---------
if(flag_flop||(flg_thread==0)) {
freemapAPD();
freemapAPDcs();
freebrs();
freeXCTm();
freePOTcs();
freeFibdir();
return;
}
//TRACE("\nECG and VCG calculating ...");
ECGcal();
}
freemapAPD();
freemapAPDcs();
freebrs();
freeXCTm();
freeFibdir();
if(!flg_calcu_option) {
freePOTcs();
}
fprintf(stdout,"Simulation End !\n");
fflush(stdout);
flg_thread=0;
flg_display=0;
flg_calculate=0;
flg_stop=0;
return;
}
void rdHRT(void) {
HFILE hFp;
short int i, j, k, nCell, index;
hFp=_lopen(dataPath+"tour.hrt ",OF_READ);
if (hFp==HFILE_ERROR) {
fprintf(stdout,"Can not open nod file ! !\n");
fflush(stdout);
flag_flop=1;
return;
}
_lread(hFp,&nttl,2);
if (nttl>NI/ND) nttl=NI/ND;
/**
* read stimulation cell's position
*/
for (i=0;i<nttl;i++) {
_lread(hFp,&ipttl[0][i*ND3],2);
_lread(hFp,&ipttl[1][i*ND3],2);
_lread(hFp,&ipttl[2][i*ND3],2);
_lread(hFp,&ipttl[3][i*ND3],2);
}
if (ND == 2) {
for (i=0;i<nttl;i++) {
ipttl[0][i*ND3] *= ND;
ipttl[1][i*ND3] *= ND;
ipttl[2][i*ND3] *= ND;
ipttl[3][i*ND3] *= ND;
for (j = 1; j < ND3; j++) {
ipttl[0][i*ND3+j] = ipttl[0][i*ND3];
ipttl[1][i*ND3+j] = ipttl[1][i*ND3];
ipttl[2][i*ND3+j] = ipttl[2][i*ND3];
ipttl[3][i*ND3+j] = ipttl[3][i*ND3];
}
ipttl[0][i*ND3+1] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+2] = ipttl[1][i*ND3]+1;
ipttl[0][i*ND3+3] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+3] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+4] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+5] = ipttl[0][i*ND3]+1;
ipttl[2][i*ND3+5] = ipttl[2][i*ND3]+1;
ipttl[1][i*ND3+6] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+6] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+7] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+7] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+7] = ipttl[2][i*ND3]+1;
}
}
if (ND == 3) {
for (i=0;i<nttl;i++) {
ipttl[0][i*ND3] *= ND;
ipttl[1][i*ND3] *= ND;
ipttl[2][i*ND3] *= ND;
//ipttl[3][i*ND3] *= ND;
for (j = 1; j < ND3; j++) {
ipttl[0][i*ND3+j] = ipttl[0][i*ND3];
ipttl[1][i*ND3+j] = ipttl[1][i*ND3];
ipttl[2][i*ND3+j] = ipttl[2][i*ND3];
ipttl[3][i*ND3+j] = ipttl[3][i*ND3];
}
// 00(1,2)
ipttl[2][i*ND3+1] = ipttl[2][i*ND3]+1;
ipttl[2][i*ND3+2] = ipttl[2][i*ND3]+2;
// 01(0,1,2)
ipttl[1][i*ND3+3] = ipttl[1][i*ND3]+1;
ipttl[1][i*ND3+4] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+4] = ipttl[2][i*ND3]+1;
ipttl[1][i*ND3+5] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+5] = ipttl[2][i*ND3]+2;
// 02(0,1,2)
ipttl[1][i*ND3+6] = ipttl[1][i*ND3]+2;
ipttl[1][i*ND3+7] = ipttl[1][i*ND3]+2;
ipttl[2][i*ND3+7] = ipttl[2][i*ND3]+1;
ipttl[1][i*ND3+8] = ipttl[1][i*ND3]+2;
ipttl[2][i*ND3+8] = ipttl[2][i*ND3]+2;
// 10(0,1,2)
ipttl[0][i*ND3+9] = ipttl[0][i*ND3]+1;
ipttl[0][i*ND3+10] = ipttl[0][i*ND3]+1;
ipttl[2][i*ND3+10] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+11] = ipttl[0][i*ND3]+1;
ipttl[2][i*ND3+11] = ipttl[2][i*ND3]+2;
// 11(0,1,2)
ipttl[0][i*ND3+12] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+12] = ipttl[1][i*ND3]+1;
ipttl[0][i*ND3+13] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+13] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+13] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+14] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+14] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+14] = ipttl[2][i*ND3]+2;
// 12(0,1,2)
ipttl[0][i*ND3+15] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+15] = ipttl[1][i*ND3]+2;
ipttl[0][i*ND3+16] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+16] = ipttl[1][i*ND3]+2;
ipttl[2][i*ND3+16] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+17] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+17] = ipttl[1][i*ND3]+2;
ipttl[2][i*ND3+17] = ipttl[2][i*ND3]+2;
// 20(0,1,2)
ipttl[0][i*ND3+18] = ipttl[0][i*ND3]+2;
ipttl[0][i*ND3+19] = ipttl[0][i*ND3]+2;
ipttl[2][i*ND3+19] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+20] = ipttl[0][i*ND3]+2;
ipttl[2][i*ND3+20] = ipttl[2][i*ND3]+2;
// 21(0,1,2)
ipttl[0][i*ND3+21] = ipttl[0][i*ND3]+2;
ipttl[1][i*ND3+21] = ipttl[1][i*ND3]+1;
ipttl[0][i*ND3+22] = ipttl[0][i*ND3]+2;
ipttl[1][i*ND3+22] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+22] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+23] = ipttl[0][i*ND3]+2;
ipttl[1][i*ND3+23] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+23] = ipttl[2][i*ND3]+2;
// 22(0,1,2)
ipttl[0][i*ND3+24] = ipttl[0][i*ND3]+2;
ipttl[1][i*ND3+24] = ipttl[1][i*ND3]+2;
ipttl[0][i*ND3+25] = ipttl[0][i*ND3]+2;
ipttl[1][i*ND3+25] = ipttl[1][i*ND3]+2;
ipttl[2][i*ND3+25] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+26] = ipttl[0][i*ND3]+2;
ipttl[1][i*ND3+26] = ipttl[1][i*ND3]+2;
ipttl[2][i*ND3+26] = ipttl[2][i*ND3]+2;
}
}
nttl *= ND3;
/**
* read cell type of each cell
*/
for (i=0;i<NK/ND;i++) {
_lread(hFp,mapCell[i*ND],NI*NJ/ND/ND);
}
if (ND == 2) {
for (k=0;k<NK/ND;k++) {
for (j=NJ/ND-1;j>=0;j--) {
for (i=NI/ND-1;i>=0;i--) {
nCell = *(mapCell[ND*k]+j*NI/ND+i);
*(mapCell[ND*k]+ND*j*NI+ND*i)= nCell;
*(mapCell[ND*k]+ND*j*NI+ND*i+1) = nCell;
*(mapCell[ND*k]+(ND*j+1)*NI+ND*i) = nCell;
*(mapCell[ND*k]+(ND*j+1)*NI+ND*i+1) = nCell;
*(mapCell[ND*k+1]+ND*j*NI+ND*i) = nCell;
*(mapCell[ND*k+1]+ND*j*NI+ND*i+1) = nCell;
*(mapCell[ND*k+1]+(ND*j+1)*NI+ND*i) = nCell;
*(mapCell[ND*k+1]+(ND*j+1)*NI+ND*i+1) = nCell;
}
}
}
*(mapCell[32*ND]+25*ND*NJ+32*ND) = 3;
*(mapCell[32*ND]+(25*ND+1)*NJ+32*ND) = 3;
*(mapCell[32*ND+1]+25*ND*NJ+32*ND) = 3;
*(mapCell[32*ND+1]+(25*ND+1)*NJ+32*ND) = 3;
}
if (ND == 3) {
for (k=0;k<NK/ND;k++) {
for (j=NJ/ND-1;j>=0;j--) {
for (i=NI/ND-1;i>=0;i--) {
nCell = *(mapCell[ND*k]+j*NI/ND+i);
*(mapCell[ND*k]+ND*j*NI+ND*i)= nCell;
*(mapCell[ND*k]+ND*j*NI+ND*i+1) = nCell;
*(mapCell[ND*k]+ND*j*NI+ND*i+2) = nCell;
*(mapCell[ND*k]+(ND*j+1)*NI+ND*i) = nCell;
*(mapCell[ND*k]+(ND*j+1)*NI+ND*i+1) = nCell;
*(mapCell[ND*k]+(ND*j+1)*NI+ND*i+2) = nCell;
*(mapCell[ND*k]+(ND*j+2)*NI+ND*i) = nCell;
*(mapCell[ND*k]+(ND*j+2)*NI+ND*i+1) = nCell;
*(mapCell[ND*k]+(ND*j+2)*NI+ND*i+2) = nCell;
*(mapCell[ND*k+1]+ND*j*NI+ND*i)= nCell;
*(mapCell[ND*k+1]+ND*j*NI+ND*i+1) = nCell;
*(mapCell[ND*k+1]+ND*j*NI+ND*i+2) = nCell;
*(mapCell[ND*k+1]+(ND*j+1)*NI+ND*i) = nCell;
*(mapCell[ND*k+1]+(ND*j+1)*NI+ND*i+1) = nCell;
*(mapCell[ND*k+1]+(ND*j+1)*NI+ND*i+2) = nCell;
*(mapCell[ND*k+1]+(ND*j+2)*NI+ND*i) = nCell;
*(mapCell[ND*k+1]+(ND*j+2)*NI+ND*i+1) = nCell;
*(mapCell[ND*k+1]+(ND*j+2)*NI+ND*i+2) = nCell;
*(mapCell[ND*k+2]+ND*j*NI+ND*i)= nCell;
*(mapCell[ND*k+2]+ND*j*NI+ND*i+1) = nCell;
*(mapCell[ND*k+2]+ND*j*NI+ND*i+2) = nCell;
*(mapCell[ND*k+2]+(ND*j+1)*NI+ND*i) = nCell;
*(mapCell[ND*k+2]+(ND*j+1)*NI+ND*i+1) = nCell;
*(mapCell[ND*k+2]+(ND*j+1)*NI+ND*i+2) = nCell;
*(mapCell[ND*k+2]+(ND*j+2)*NI+ND*i) = nCell;
*(mapCell[ND*k+2]+(ND*j+2)*NI+ND*i+1) = nCell;
*(mapCell[ND*k+2]+(ND*j+2)*NI+ND*i+2) = nCell;
}
}
}
*(mapCell[32*ND]+25*ND*NJ+32*ND) = 3;
*(mapCell[32*ND]+(25*ND+1)*NJ+32*ND) = 3;
*(mapCell[32*ND]+(25*ND+2)*NJ+32*ND) = 3;
*(mapCell[32*ND+1]+25*ND*NJ+32*ND) = 3;
*(mapCell[32*ND+1]+(25*ND+1)*NJ+32*ND) = 3;
*(mapCell[32*ND+1]+(25*ND+2)*NJ+32*ND) = 3;
*(mapCell[32*ND+2]+25*ND*NJ+32*ND) = 3;
*(mapCell[32*ND+2]+(25*ND+1)*NJ+32*ND) = 3;
*(mapCell[32*ND+2]+(25*ND+2)*NJ+32*ND) = 3;
}
_lclose(hFp);
}
void freemapAPD(void) {
for (short int i=0;i<NK;i++) {
free(mapACT[i]);
free(mapCell[i]);
free(locXCT[i]);
}
free(iparm);
free(kmin);
free(kmax);
free(iStep);
}
void freebrs(void) {
short int i = 0;
for (i=0;i<3;i++) {
free(r[i]);
free(rn[i]);
}
for (i=0;i<NCELL;i++) {
free(ydata[i]);
}
}
void freemapAPDcs(void) {
for (short int i=0;i<NK;i++) {
free(mapAPD[i]);
free(mapSpeed[i]); //added by Zhu
}
}
void freemagcs(void) {
for (short int i=0;i<4;i++) {
free(mag[i]);
}
}
void freePOTcs(void) {
for(short i=0;i<NL;i++) {
free(POT_reduce[i]);//by sf 090622
free(POT[i]);
free(aw[i]);
}
free(POTi);
free(bw);
}
void freeXCTm(void)
{
for(short int i=0;i<NCYCL;i++) {
free(mapXCTm[i]);
}
}
void freeFibdir(void)
{
if (ANISO==1) {
for (short int i=0;i<3;i++) {
free(fibdir[i]);
}
}
}
// read position parameter of heart & call transfer matrix ----
void rdpos(void) {
void transf(void);
HFILE hFp;
short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'p');
//filepath.SetAt(index+2,'o');
//filepath.SetAt(index+3,'s');
//hFp = _lopen(filepath,OF_READ);
hFp=_lopen(dataPath+"tour.pos ",OF_READ);
if (hFp==HFILE_ERROR) {
fprintf(stdout,"Can not open pos file ! !\n");
fflush(stdout);
flag_flop=1;
return;
}
_lread(hFp,&HRTscale,4);
_lread(hFp,&HRTx0,4);
_lread(hFp,&HRTy0,4);
_lread(hFp,&HRTz0,4);
_lread(hFp,&phai,4);
_lread(hFp,&pusai,4);
_lread(hFp,&theta,4);
_lclose(hFp);
transf();
}
// Read heart
/**
* normal cell's para
*/
// APD parameters
// Cell sn atr anv hb bb pkj vtr ab1 ab2 ab3 ab4 ab5 ab6 ab7
// Parm 1 2 3 4 5 6 7 8 9 10 11 12 13 14
// 1 T0 30 10 0 0 0 0 0 0 0 0 0 0 0 0
// 2 T1 0 0 5 5 5 5 5 5 5 5 5 5 5 5
// 3 T2 0 0 100 100 100 105 75 75 75 75 75 75 75 75
// 4 T3 175 120 175 175 175 195 175 175 175 175 175 175 175 175
// 5 APR 170 100 210 210 210 250 200 200 200 200 200 200 200 200
// 6 FRT 205 140 320 320 320 345 295 295 295 295 295 295 295 295
// 7 V0 -90 -90 -90 -90 -90 -90 -90 -90 -90 -90 -90 -90 -90 -90
// 8 V1 30 -20 40 40 40 40 40 40 40 40 40 40 40 40
// 9 V2 30 -20 30 30 30 30 30 30 30 30 30 30 30 30
// 10 GRD 250 0 0 0 0 5 5 5 5 5 5 5 5 5
// 11 DCS 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 12 DVT 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 13 ECF 100 100 100 100 100 100 100 0 0 0 0 0 0 0
// 14 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 15 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 18 BCL 800 0 0 0 0 0 0 0 0 0 0 0 0 0
// 19 BN 1 0 0 0 0 0 0 0 0 0 0 0 0 0
// 20 inc 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 21 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 22 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 23 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 24 ICL 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 25 PRT 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 26 DLY 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 27 ACC 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 28 PBP 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 29 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 30 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 31 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 32 CS 50 100 10 250 250 250 50 -12 0 0 0 0 0 0
// 33 DC 0 0 0 0 0 0 0 1 0 0 0 0 0 0
// 34 0 0 0 0 0 1 0 0 0 0 0 0 0 0
// 35 0 0 0 0 0 0 0 0 0 0 0 0 0 0
//
void rdAPDm(void)
{
short int npoint[NCELL],ixsmp[NCELL][100],num;
float ysmp[NCELL][100];
short int iT0,iT01,iT012,iT0123;
short int incr,iBN0,iBCL,iBN,ntstp,iS2ST,iS2CL,iS2BN;
float dx0,dx1,dx2,dx01,dx02,dx10,dx12,dx20,dx21,a,b;
HFILE hFp;
short int icell,i,j,k,icurv,index;
// hFp=_lopen("f:/apd/apdapd.5",READ);
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'a');
//filepath.SetAt(index+2,'p');
//filepath.SetAt(index+3,'d');
//hFp=_lopen(filepath,OF_READ);
hFp=_lopen(dataPath+"tour.apd ",OF_READ);
if(hFp==HFILE_ERROR) {
fprintf(stdout,"Can not open apd file ! !\n");
fflush(stdout);
flag_flop=1;
return;
}
for(icell=0;icell<NCELL;icell++) {
_lread(hFp,iparm+icell*NPARM,NPARM*2);
*(iparm+icell*35+0) = *(iparm+icell*35+0)*ND;
*(iparm+icell*35+1) = *(iparm+icell*35+1)*ND;
*(iparm+icell*35+2) = *(iparm+icell*35+2)*ND;
*(iparm+icell*35+3) = *(iparm+icell*35+3)*ND;
*(iparm+icell*35+4) = *(iparm+icell*35+4)*ND;
*(iparm+icell*35+5) = *(iparm+icell*35+5)*ND;
//*(iparm+icell*35+10) = *(iparm+icell*35+10)*ND;
_lread(hFp,&npoint[icell],2);
for(j=0;j<npoint[icell];j++) {
_lread(hFp,&ixsmp[icell][j],2);
ixsmp[icell][j] = ixsmp[icell][j]*ND;
_lread(hFp,&ysmp[icell][j],4);
}
}
_lclose(hFp);
mBCL=0;
miBN=0;
mS2ST=0;
mS2CL=0;
mS2BN=0;
maxXctStep=0;
for(icell=0;icell<NCELL;icell++) {
incr=*(iparm+icell*NPARM+19);
if(incr<0) {
iBN0=1-100/incr;
if(*(iparm+icell*NPARM+18)>iBN0) *(iparm+icell*NPARM+18)=iBN0;
}
//iBCL=*(iparm+icell*NPARM+17); //basic cycle length
*(iparm+icell*NPARM+17) = *(iparm+icell*NPARM+17)*ND;
// S2, additional stimulus
*(iparm+icell*NPARM+14) = *(iparm+icell*NPARM+14)*ND;
*(iparm+icell*NPARM+15) = *(iparm+icell*NPARM+15)*ND;
iS2ST=*(iparm+icell*NPARM+14);
iS2CL=*(iparm+icell*NPARM+15);
iS2BN=*(iparm+icell*NPARM+16);
iBCL=*(iparm+icell*NPARM+17); //basic cycle length
iBN=*(iparm+icell*NPARM+18); // beat number
ntstp=(iBN*iBCL+iBN*(iBN-1)*iBCL*incr/200)/3;
if(iBCL>mBCL) mBCL=iBCL;
if(iBN>miBN) miBN=iBN;
if(iS2ST>mS2ST) mS2ST=iS2ST;
if(iS2CL>mS2CL) mS2CL=iS2CL;
if(iS2BN>mS2BN) mS2BN=iS2BN;
if(ntstp>maxXctStep) maxXctStep=ntstp;
// CL increament: % --> TS
*(iparm+icell*NPARM+19)=iBCL*incr*ND/300;
*(iparm+icell*NPARM+17)=iBCL/3;
// iparm(18) <-- total pacing time
*(iparm+icell*NPARM+18)=ntstp;
// FRP <-- FRP-ARP
*(iparm+icell*NPARM+5)=*(iparm+icell*NPARM+5)-*(iparm+icell*NPARM+4);
// intrinsic CL: ms --> TS
*(iparm+icell*NPARM+23)=*(iparm+icell*NPARM+23)/3;
//-- conduction speed(100*)CS:
// CS(m/s) --> CS(2*1.5 mm/3ms) --> CS*2(cell/Step) ----
/*<Comment by ALF> why 100*/
*(iparm+icell*NPARM+31)=*(iparm+icell*NPARM+31)*2;
// we only have two points to represent his bundle and bundle branches
//if (icell == 3) *(iparm+(icell-1)*NPARM+31)=*(iparm+(icell-1)*NPARM+31)/11;
//if (icell == 3) *(iparm+(icell-1)*NPARM+31)=100;
//if (icell == 5) *(iparm+(icell-1)*NPARM+31)=*(iparm+(icell-1)*NPARM+31)/ND;
// initialize ydata
for(short int n=0;n<1000*ND;n++)
*(ydata[icell]+n)=(float)*(iparm+icell*NPARM+6);
//for (int ii=0; ii < NPARM; ii++ )
//TRACE("\nCell %2d %2d %d", icell, ii, *(iparm+icell*NPARM+ii));
}
// --- data set ---
for(icurv=0;icurv<NCELL;icurv++) {
num=npoint[icurv];
iT0=*(iparm+icurv*NPARM);
iT01=iT0+*(iparm+icurv*NPARM+1);
iT012=iT01+*(iparm+icurv*NPARM+2);
iT0123=iT012+*(iparm+icurv*NPARM+3);
//---- lenth of APD ------
la012[icurv]=iT012;
la0123[icurv]=iT0123;
// --- t = phased 0 ---
for(i=0;i<=(iT0-1);i++) { // < ? July 4, 1996
// +++++ iparm(icurv,6), the real value +++++
a=(float)(-*(iparm+icurv*NPARM+6)+*(iparm+icurv*NPARM+7))
/(float)*(iparm+icurv*NPARM);
b=(float)*(iparm+icurv*NPARM+6);
*(ydata[icurv]+i)=a*i+b;
}
*(ydata[icurv]+iT0)=(float)*(iparm+icurv*NPARM+7);
// --- t = phase 1 ---
if(iT01>iT0) {
for(i=(iT0+1);i<=(iT01-1);i++) {
a=(float)(*(iparm+icurv*NPARM+8)-*(iparm+icurv*NPARM+7))
/(float)*(iparm+icurv*NPARM+1);
b=(float)*(iparm+icurv*NPARM+7)-a*iT0;
*(ydata[icurv]+i)=a*i+b;
}
}
// --- t = phase 2 ---
for(i=iT01;i<=iT012;i++)
*(ydata[icurv]+i)=(float)*(iparm+NPARM*icurv+8);
//---- t= phase 3 ----
for(i=(iT012+1);i<=iT0123;i++) {
if((i<ixsmp[icurv][num-3])&&(i>ixsmp[icurv][num-2])) {
dx0=(float)(i-ixsmp[icurv][num-1]);
dx1=(float)(i-ixsmp[icurv][num-2]);
dx2=(float)(i-ixsmp[icurv][num-3]);
dx01=(float)(ixsmp[icurv][num-1]-ixsmp[icurv][num-2]);
dx02=(float)(ixsmp[icurv][num-1]-ixsmp[icurv][num-3]);
dx10=(float)(ixsmp[icurv][num-2]-ixsmp[icurv][num-1]);
dx12=(float)(ixsmp[icurv][num-2]-ixsmp[icurv][num-3]);
dx20=(float)(ixsmp[icurv][num-3]-ixsmp[icurv][num-1]);
dx21=(float)(ixsmp[icurv][num-3]-ixsmp[icurv][num-2]);
*(ydata[icurv]+i)=dx1*dx2*ysmp[icurv][num-1]/dx01/dx02
+dx0*dx2*ysmp[icurv][num-2]/dx10/dx12
+dx0*dx1*ysmp[icurv][num-3]/dx20/dx21;
}
for(k=2;k<num-3;k++) {
if(i==ixsmp[icurv][k+1])
*(ydata[icurv]+i)=ysmp[icurv][k+1];
else if(i==ixsmp[icurv][k])
*(ydata[icurv]+i)=ysmp[icurv][k];
else if((i<ixsmp[icurv][k])&&(i>ixsmp[icurv][k+1])) {
dx0=(float)(i-ixsmp[icurv][k+1]);
dx1=(float)(i-ixsmp[icurv][k]);
dx2=(float)(i-ixsmp[icurv][k-1]);
dx01=(float)(ixsmp[icurv][k+1]-ixsmp[icurv][k]);
dx02=(float)(ixsmp[icurv][k+1]-ixsmp[icurv][k-1]);
dx10=(float)(ixsmp[icurv][k]-ixsmp[icurv][k+1]);
dx12=(float)(ixsmp[icurv][k]-ixsmp[icurv][k-1]);
dx20=(float)(ixsmp[icurv][k-1]-ixsmp[icurv][k+1]);
dx21=(float)(ixsmp[icurv][k-1]-ixsmp[icurv][k]);
*(ydata[icurv]+i)=dx1*dx2*ysmp[icurv][k+1]/dx01/dx02
+dx0*dx2*ysmp[icurv][k]/dx10/dx12
+dx0*dx1*ysmp[icurv][k-1]/dx20/dx21;
}
}
}
}
}
/**
* transform matrix for (i,j,k) -> (x, y, z)
*/
// transf: coordinate transformation
void transf(void) {
short int i,j,k;
float a2[3][3],a;
float a1[3][6]={
1.0, 0.5, 0.5, -0.5, -0.5, 0.0,
0.0,0.866, 0.2886,0.866, 0.2886,-0.5773,
0.0, 0.0,-0.8165, 0.0,-0.8165,-0.8165};
float rd=1.745329252e-2;
float ph=rd*phai;
float ps=rd*pusai;
float th=rd*theta;
float cph=cos(ph);
float sph=sin(ph);
float cps=cos(ps);
float sps=sin(ps);
float cth=cos(th);
float sth=sin(th);
a2[0][0]=cps*cph-cth*sps*sph;
a2[0][1]=-sps*cph-cth*cps*sph;
a2[0][2]=sth*sph;
a2[1][0]=cps*sph+cth*sps*cph;
a2[1][1]=-sps*sph+cth*cps*cph;
a2[1][2]=-sth*cph;
a2[2][0]=sps*sth;
a2[2][1]=cps*sth;
a2[2][2]=cth;
for (i=0;i<3;i++) {
for (j=0;j<6;j++) {
a=0;
for (k=0;k<3;k++)
a=a+a2[i][k]*a1[k][j];
tmswf[i][j]=(float)(a*HRTscale/ND);
}
}
}
/**
* torso position
*/
// Read the data of nodes and derivatives
void rdnod(void) {
short int i;
HFILE hFp;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'n');
//filepath.SetAt(index+2,'o');
//filepath.SetAt(index+3,'d');
//hFp=_lopen(filepath,OF_READ);
hFp=_lopen(dataPath+"tour.nod ",OF_READ);
if (hFp==HFILE_ERROR) {
fprintf(stdout,"Can not open nod file ! !\n");
flag_flop=1;
return;
}
_lread(hFp,&nPos,2);
if (nPos>NL) nPos=NL;
for (i=0;i<nPos;i++) {
_lread(hFp,r[0]+i,4);
_lread(hFp,r[1]+i,4);
_lread(hFp,r[2]+i,4);
}
for (i=0;i<nPos;i++) {
_lread(hFp,rn[0]+i,4);
_lread(hFp,rn[1]+i,4);
_lread(hFp,rn[2]+i,4);
}
_lclose(hFp);
}
// Read electrode position file
void rdelc(void) {
short int i;
float eps[3][6],weight[3][6];
short int member[3][6];
HFILE hFp;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'e');
//filepath.SetAt(index+2,'l');
//filepath.SetAt(index+3,'c');
//hFp=_lopen(filepath,OF_READ);
hFp=_lopen(dataPath+"tour.elc ",OF_READ);
if (hFp==HFILE_ERROR) {
fprintf(stdout,"Can not open elc file ! !\n");
flag_flop=1;
return;
}
for (i=0;i<6;i++) {
_lread(hFp,&eps[0][i],4);
_lread(hFp,&eps[1][i],4);
_lread(hFp,&eps[2][i],4);
}
for (i=0;i<6;i++) {
_lread(hFp,&weight[0][i],4);
_lread(hFp,&weight[1][i],4);
_lread(hFp,&weight[2][i],4);
}
for(i=0;i<6;i++) {
_lread(hFp,&member[0][i],2);
_lread(hFp,&member[1][i],2);
_lread(hFp,&member[2][i],2);
}
_lread(hFp,&nv[0],2);
_lread(hFp,&nv[1],2);
_lread(hFp,&nv[2],2);
_lclose(hFp);
}
void locfile(void) {
short int i,j,k;
totalCell=0;
for (k=0;k<NK;k++)
for (j=0;j<NJ;j++)
for (i=0;i<NI;i++) {
if ((*(mapCell[k]+j*NI+i)>0)&&(*(mapCell[k]+j*NI+i)<NCELL+1)) {
*(locXCT[k]+j*NI+i)=totalCell;
totalCell++;
} else *(locXCT[k]+j*NI+i)=-1;
}
//TRACE("\nTotal Cells: %d", totalCell);
}
// Geometric information of heart model
void geoinfc(void) {
int i0, ii, endoAn, iendo;
short int i,j,k;
short int l, m, flag;
short int endoAx[20000*ND3];
short int endoAy[20000*ND3];
short int endoAz[20000*ND3];
//short int iseqx[12]={-1,-1, 0, 0, 1, 0, 1, 1, 0, 0,-1, 0 };
//short int iseqy[12]={ 0, 1, 1, 0, 0, 1, 0,-1,-1, 0, 0,-1 };
//short int iseqz[12]={ 0, 0, 0,-1,-1,-1, 0, 0, 0, 1, 1, 1 };
/**
* coor-delta matrix
*/
short int iseqx[6]={-1, 0, 0, 1, 0, 0};
short int iseqy[6]={ 0, 1, 0, 0,-1, 0};
short int iseqz[6]={ 0, 0,-1, 0, 0, 1};
// Margins of each (i,j)
/**
* max_min value of k of model at (i,j)
*/
// get kmin and kmax for each [NI][NJ] frame
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
k = 0;
while (k < NK) {
if (*(mapCell[k]+j*NI+i)>0) { /*<Comment by ALF> have cell, some duplicate point by using this method*/
*(kmin+j*NI+i)=k;
for (k=NK-1;k>-1;k--) {
if (*(mapCell[k]+j*NI+i)>0) {
*(kmax+j*NI+i)=k;
k = NK*2;
break;
}
}
}
k++;
}
if (k < NK*2) {
*(kmin+j*NI+i)=NK+1;
*(kmax+j*NI+i)=0;
}
}
}
//-------------------- modified by ALF at 2008-8-19 begin -------------------->
//add: get epicardial triangle's vertex position, also some duplicate point in epicXYZ
//Nepic = NI*NJ*2; //by sf
epicX.reserve(Nepic);
epicY.reserve(Nepic);
epicZ.reserve(Nepic);
for (i=0; i<NI; ++i) {
for (j=0; j<NJ; ++j) {
epicX.push_back(i);
epicY.push_back(j);
epicZ.push_back(*(kmin+j*NI+i)-1);
}
}
for (i=0; i<NI; ++i) {
for (j=0; j<NJ; ++j) {
epicX.push_back(i);
epicY.push_back(j);
epicZ.push_back(*(kmax+j*NI+i)+1);
}
}
for (i=0; i<Nepic; ++i) {
epicX_old[i]=epicX[i];
epicY_old[i]=epicY[i];
epicZ_old[i]=epicZ[i];
}
//-------------------- modified by ALF at 2008-8-19 end --------------------<
// get kTop: minimum of kmin and
// kBtm: maximum of kmax
kTop=NK+1;
kBtm=0;
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(kmin+j*NI+i)<kTop) kTop=*(kmin+j*NI+i);
if (*(kmax+j*NI+i)>kBtm) kBtm=*(kmax+j*NI+i);
}
}
// get kVtr: ventricular position, so heart can be divided into two parts
for (k=kTop;k<=kBtm;k++) {
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if ((*(mapCell[k]+j*NI+i)>4)&&(*(mapCell[k]+j*NI+i)<15)) {
kVtr=k; // ventricular position
i = NI;
j = NJ;
k = kBtm;
}
}
}
}
nHB=0;
nBB=0;
// get Bundle branches & his branches' position
for (k=kTop;k<=kBtm;k++) {
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
//if (*(mapCell[k]+j*NI+i)==4) {
// change to BB
if (*(mapCell[k]+j*NI+i)==5) {
iBB[0][nBB]=i;
iBB[1][nBB]=j;
iBB[2][nBB]=k;
nBB++;
} else if (*(mapCell[k]+j*NI+i)==4) {
iHB[0][nHB]=i;
iHB[1][nHB]=j;
iHB[2][nHB]=k;
nHB++;
}
}
}
}
// get endocardial positions
for (m=0;m<2;m++) {
for (ii=0;ii<20000*ND3;ii++) {
endoAx[ii]=0;
endoAy[ii]=0;
endoAz[ii]=0;
}
if (m==0) {
endoAx[0]=24*ND;
endoAy[0]=30*ND;
endoAz[0]=40*ND;
for (ii=0;ii<NENDO*ND3;ii++) {
endoBx[ii]=0;
endoBy[ii]=0;
endoBz[ii]=0;
}
} else if (m==1) {
endoAx[0]=26*ND;
endoAy[0]=13*ND;
endoAz[0]=36*ND;
for (ii=0;ii<NENDO*ND3;ii++) {
endoCx[ii]=0;
endoCy[ii]=0;
endoCz[ii]=0;
}
}
// TRACE("\nFirst %d",*(mapCell[endoAz[0]]+endoAy[0]*NI+endoAx[0]));
*(mapCell[endoAz[0]]+endoAy[0]*NI+endoAx[0])=30;
iendo=0;
endoAn=1;
i0=0;
while (i0<endoAn) {
flag=0;
for (l=0;l<6;l++) {
i=endoAx[i0]+iseqx[l];
if((i<0)||(i>NI)) continue;
j=endoAy[i0]+iseqy[l];
if((j<0)||(j>NJ)) continue;
k=endoAz[i0]+iseqz[l];
if((k<kTop)||(k>kBtm)) continue;
if (*(mapCell[k]+j*NI+i)==0) { /*<Comment by ALF> find the normal direction */
*(mapCell[k]+j*NI+i)=30; /*<Comment by ALF> 30 is only a value to make sure no confuse with valid type */
endoAx[endoAn]=i;
endoAy[endoAn]=j;
endoAz[endoAn]=k;
endoAn++;
}
if ((flag==0) && *(mapCell[k]+j*NI+i)>0 && *(mapCell[k]+j*NI+i)<16) {
if (m==0) {
endoBx[iendo]=endoAx[i0];
endoBy[iendo]=endoAy[i0];
endoBz[iendo]=endoAz[i0];
} else if (m==1) {
endoCx[iendo]=endoAx[i0];
endoCy[iendo]=endoAy[i0];
endoCz[iendo]=endoAz[i0];
}
iendo++;
flag=1;
}
}
i0++;
}
if (m==0) {
NendoB=iendo;
//TRACE("\nEndo B %d",NendoB);
} else if (m==1) {
NendoC=iendo;
//TRACE("\nEndo C %d",NendoC);
}
}
for (k=kTop;k<=kBtm;k++) {
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(mapCell[k]+j*NI+i)==30) {
*(mapCell[k]+j*NI+i)=0;
}
}
}
}
}
// ---- set parameter of the anisotropy ------
//
// velocicy(l)=0.5 m/s ==> dist*3/9msec
// velocity(t)/velocity(l)=1/3
// velocity(t)/velocity(l)=0.42?
// resistance(t)/(l)=9
// according to Clerc
// see Robert D.E., Circ. Res. 44:701-712,1979
// Input: dist
// Output: vl2[10],vt2[10],rrat1
//
void setaniso(void) {
short int i, ltrat;
float vl,vt,vrat,rrat;
float fct;
ltrat=2;
//ltrat=1;
fct=1.1;
vrat=1.0/ltrat;
rrat=1.0/9;
//vrat=1.0;
//rrat=1.0;
rrat1=rrat-1.;
for (i=0; i<10;i++) {
vl=fct*(i+1)*HRTscale/ND;
vt=vl*vrat;
vl2[i]=vl*vl;
vt2[i]=vt*vt;
//TRACE("\ni vl2 vt2 %2d %f %f,", i, vl2[i],vt2[i]);
}
}
//
// --- calculate out-products of 'cell-neighber vectors' ----
//
//
void neibdir (void) {
void ijktoxyz(short int [3], float [3]);
short int i, j;
short int istrt[3],iterm[3],iterm1[3];
float strt[3],term[3],dir[3];
float term1[3],dir1[3],r;
short int iseqx[12]={-1,-1, 0, 0, 1, 0, 1, 1, 0, 0,-1, 0 };
short int iseqy[12]={ 0, 1, 1, 0, 0, 1, 0,-1,-1, 0, 0,-1 };
short int iseqz[12]={ 0, 0, 0,-1,-1,-1, 0, 0, 0, 1, 1, 1 };
istrt[0]=0;
istrt[1]=0;
istrt[2]=0;
ijktoxyz(istrt,strt);
for (i=0;i<12;i++)
for (j=0;j<12;j++) {
if (i==j) {
prx[i][j]=0.;
pry[i][j]=0.;
prz[i][j]=0.;
continue;
}
if (i>5 && j>5) {
prx[i][j]=prx[i-6][j-6];
pry[i][j]=pry[i-6][j-6];
prz[i][j]=prz[i-6][j-6];
continue;
}
if (j>5) {
prx[i][j]=-prx[i][j-6];
pry[i][j]=-pry[i][j-6];
prz[i][j]=-prz[i][j-6];
continue;
}
if (i>5) {
prx[i][j]=-prx[i-6][j];
pry[i][j]=-pry[i-6][j];
prz[i][j]=-prz[i-6][j];
continue;
}
iterm[0]=iseqx[i];
iterm[1]=iseqy[i];
iterm[2]=iseqz[i];
ijktoxyz(iterm,term);
//linedir(strt,term,dir);
dir[0]=term[0]-strt[0];
dir[1]=term[1]-strt[1];
dir[2]=term[2]-strt[2];
r=sqrt(dir[0]*dir[0]+dir[1]*dir[1]+dir[2]*dir[2]);
dir[0]=dir[0]/r;
dir[1]=dir[1]/r;
dir[2]=dir[2]/r;
iterm1[0]=iseqx[j];
iterm1[1]=iseqy[j];
iterm1[2]=iseqz[j];
ijktoxyz(iterm1,term1);
//linedir(strt,term1,dir1);
dir1[0]=term1[0]-strt[0];
dir1[1]=term1[1]-strt[1];
dir1[2]=term1[2]-strt[2];
r=sqrt(dir1[0]*dir1[0]+dir1[1]*dir1[1]+dir1[2]*dir1[2]);
dir1[0]=dir1[0]/r;
dir1[1]=dir1[1]/r;
dir1[2]=dir1[2]/r;
//TRACE("\nlidir1 %f %f %f ",dir1[0],dir1[1],dir1[2]);
// outprod(dir,dir1,out);
prx[i][j]=dir[1]*dir1[2]-dir[2]*dir1[1];
pry[i][j]=dir[2]*dir1[0]-dir[0]*dir1[2];
prz[i][j]=dir[0]*dir1[1]-dir[1]*dir1[0];
r=sqrt(prx[i][j]*prx[i][j]+pry[i][j]*pry[i][j]+prz[i][j]*prz[i][j]);
prx[i][j]=prx[i][j]/r;
pry[i][j]=pry[i][j]/r;
prz[i][j]=prz[i][j]/r;
}
/*
for (i=0;i<6;i++)
for (j=0;j<6;j++) {
TRACE("\nneibdir %d %d %f %f %f ",i,j,prx[i][j],pry[i][j],prz[i][j]);
}
*/
}
//
// ---- fibplane direction angle ------
// all plane directions are in j=22 (assumed to be parallel
// to the septal plane
// for all directions, lines atart from (1,22,90) to
// (Note: Selectable)plane(1): (50,22,90)
// (assumed perpendicular to heart axis)
//
void fibplane (void) {
float getAngle(float [], float []);
void ijktoxyz(short int [], float []);
//void linedir(float [], float [], float []);
short int i, j, k, n;
short int iorg[3]={1,19,90};
short int iterm0[3]={1,19,1};
short int iterm[3];
float org[3];
float term0[3],term[3];
float dir0[3],dir[3];
float r;
float ang=1.;
float arch=1.;
float pai=3.1415926;
float delt;
// ---- angle per layer, max rotation angle=pi/2
//TRACE("\nmaxlayer= %d ",maxlay);
arch=pai/180.;
if (maxlay<=0) return;
//delt=(2./3.)*pai/maxlay;
delt=(1./4.)*pai/maxlay;
//TRACE("\ndelt/arch= %f ",delt/arch);
// ----- all in septal plane ----->
ijktoxyz(iorg,org);
ijktoxyz(iterm0,term0);
//linedir(org,term0,dir0);
dir0[0]=term0[0]-org[0];
dir0[1]=term0[1]-org[1];
dir0[2]=term0[2]-org[2];
r=sqrt(dir0[0]*dir0[0]+dir0[1]*dir0[1]+dir0[2]*dir0[2]);
planedir[0][0]=dir0[0]/r;
planedir[1][0]=dir0[1]/r;
planedir[2][0]=dir0[2]/r;
// --- search next planedir ---->
i=iterm0[0];
j=iterm0[1];
k=iterm0[2];
// TRACE("\nplanedir 0 %f %f %f",planedir[0][0],planedir[1][0],planedir[2][0]);
for (n=1; n<=maxlay; n++) {
do {
if (i<NI) {
i=i+1;
} else {
k=k+1;
}
iterm[0]=i;
iterm[1]=j;
iterm[2]=k;
ijktoxyz(iterm,term);
//linedir(org,term,dir);
dir[0]=term[0]-org[0];
dir[1]=term[1]-org[1];
dir[2]=term[2]-org[2];
r=sqrt(dir[0]*dir[0]+dir[1]*dir[1]+dir[2]*dir[2]);
dir[0]=dir[0]/r;
dir[1]=dir[1]/r;
dir[2]=dir[2]/r;
ang=getAngle(dir0,dir);
//TRACE("\n %f %f %f %f %f %f %f %d %d %d %f %f",dir0[0],dir0[1],dir0[2],
// dir[0],dir[1],dir[2],ang,i,j,k,ang*arch,n*delt);
} while (ang*arch < n*delt);
planedir[0][n]=dir[0];
planedir[1][n]=dir[1];
planedir[2][n]=dir[2];
//TRACE("\nplanedir %2d %f %f %f",n,planedir[0][n],planedir[1][n],planedir[2][n]);
}
// for test ---->
/*
for (n=0; n< maxlay; n++) {
for (m=0; m<3; m++) {
dir0[m]=planedir[m][n];
dir[m]=planedir[m][n+1];
}
ang=getAngle(dir0,dir);
TRACE("\nn,ang %d,%f",n,ang);
}
*/
// <---- test end
}
//
//******** fiber direction for each (i,j,k) *************
//
void fibdirct(void) {
float getAngle(float [], float []);
short int normdir(short int, short int, short int, float []);
short int i, j, k, nneib, iLayer;
char iCell;
int locfib;
// float test[3],test1[3];
float tmpx,tmpy,tmpz;
// float ang;
float pdirx,pdiry,pdirz,r;
//float dirx,diry,dirz;
float nordir[3];
//dirx=0.;
//diry=0.;
//dirz=0.;
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=0;i<NI;i++) {
locfib=*(locXCT[k]+j*NI+i);
iCell=*(mapCell[k]+j*NI+i);
if (iCell!=7) iLayer=0;
else iLayer=*(mapACT[k]+j*NI+i)+1;
if (locfib==-1) continue;
//if (*(MapLyr+locfib)<=0) continue;
//if (*(MapLyr+locfib) >= 30) continue;
if (iLayer<=0 || iLayer>=30) continue;
nneib=normdir(i,j,k,nordir);
r=sqrt(nordir[0]*nordir[0]+nordir[1]*nordir[1]+nordir[2]*nordir[2]);
//TRACE("\nnordir %2d %2d %2d %f %f %f %d",
// i+1,j+1,k+1,nordir[0],nordir[1],nordir[2],nneib);
if (r<0.0000001) continue;
pdirx=planedir[0][iLayer-1];
pdiry=planedir[1][iLayer-1];
pdirz=planedir[2][iLayer-1];
//TRACE("\npdir %f %f %f %d",pdirx,pdiry,pdirz,iLayer);
// --- fiberdir = planedir X normldir
tmpx=pdiry*nordir[2]-pdirz*nordir[1];
tmpy=pdirz*nordir[0]-pdirx*nordir[2];
tmpz=pdirx*nordir[1]-pdiry*nordir[0];
r=sqrt(tmpx*tmpx+tmpy*tmpy+tmpz*tmpz);
if (r<0.0000001) continue;
*(fibdir[0]+locfib)=tmpx/r;
*(fibdir[1]+locfib)=tmpy/r;
*(fibdir[2]+locfib)=tmpz/r;
//TRACE("\nfibdir %2d %2d %2d %f %f %f %d",i+1,j+1,k+1, *(fibdir[0]+locfib),
// *(fibdir[1]+locfib),*(fibdir[2]+locfib),locfib);
}
}
}
// ---- for test------>
/*
TRACE("\nj=22");
i=21;
for(k=58;k<62;k++) {
for(j=31;j<33;j++) {
for(n=0;n<3;n++) {
locfib=*(locXCT[k]+j*NI+i);
test[n]=*(fibdir[n]+locfib);
test1[n]=planedir[n][1];
}
ang=getAngle(test,test1);
TRACE("\n %d %d %d %f %f %f %f %d",
i,j,k,ang,test[0],test[1],test[2],locfib);
}
}
*/
}
//
// calculate normal direction of fibplane at cell i
//
short int normdir(short int icl,short int jcl,short int kcl,
float nordir[3]){
char iCell,jCell;
short int i,iLayer,jLayer;
short int iface[12];
int locnor, jloc;
short int jx,jy,jz,l;
float r,dirx,diry,dirz;
short int nneib;
short int iseqx[12]={-1,-1, 0, 0, 1, 0, 1, 1, 0, 0,-1, 0};
short int iseqy[12]={ 0, 1, 1, 0, 0, 1, 0,-1,-1, 0, 0,-1};
short int iseqz[12]={ 0, 0, 0,-1,-1,-1, 0, 0, 0, 1, 1, 1};
nneib=0;
r=0.;
nordir[0]=0.;
nordir[1]=0.;
nordir[2]=0.;
dirx=0.;
diry=0.;
dirz=0.;
for (i=0;i<3;i++) {
nordir[i]=0.;
}
for (i=0;i<12;i++) {
iface[i]=0;
}
locnor=*(locXCT[kcl]+jcl*NI+icl);
iCell=*(mapCell[kcl]+jcl*NI+icl);
if (iCell!=7) iLayer=0;
else iLayer=*(mapACT[kcl]+jcl*NI+icl)+1;
for (l=0;l<12;l++) {
jx=icl+iseqx[l];
if(jx<0 || jx>=NI) continue;
jy=jcl+iseqy[l];
if(jy<0 || jy>=NJ) continue;
jz=kcl+iseqz[l];
if(jz<0 || jz>=NK) continue;
jloc=*(locXCT[jz]+jy*NI+jx);
if(jloc==-1) continue;
jCell=*(mapCell[jz]+jy*NI+jx);
if (jCell!=7) jLayer=0;
else jLayer=*(mapACT[jz]+jy*NI+jx)+1;
//TRACE("\n%2d %2d %2d %d %d %d %d", jx+1,jy+1,jz+1,jCell,jLayer,iLayer,nneib);
if(jLayer<1) continue;
if(iLayer!=jLayer) continue;
iface[nneib]=l;
nneib=nneib+1;
}
// --- neglect fiber edge --->
if(nneib<=1) return nneib;
for(l=0;l<nneib-1;l++) {
dirx=dirx+prx[iface[l]][iface[l+1]];
diry=diry+pry[iface[l]][iface[l+1]];
dirz=dirz+prz[iface[l]][iface[l+1]];
}
// --- two neighbering points only --->
// --- in opposite --->
dirx=dirx+prx[iface[nneib-1]][iface[0]];
diry=diry+pry[iface[nneib-1]][iface[0]];
dirz=dirz+prz[iface[nneib-1]][iface[0]];
dirx=dirx/(1.*nneib);
diry=diry/(1.*nneib);
dirz=dirz/(1.*nneib);
r=sqrt(dirx*dirx+diry*diry+dirz*dirz);
if (r<0.00001) {
;//TRACE("\nicl,jcl,kcl,nneib,iface %d %d %d %d %d %d",
// icl,jcl,kcl,nneib,iface[0],iface[1]);
} else {
dirx=dirx/r;
diry=diry/r;
dirz=dirz/r;
}
nordir[0]=dirx;
nordir[1]=diry;
nordir[2]=dirz;
return nneib;
}
//
// ---- angle of two vectors ---
//
float getAngle (float vct1[3], float vct2[3]) {
short int n;
float pi=3.1415926;
float ang1=0.;
float sumv=0.;
float sumv1=0.;
float sumv2=0.;
for (n=0;n<3;n++) {
sumv1=sumv1+vct1[n]*vct1[n];
sumv2=sumv2+vct2[n]*vct2[n];
sumv=sumv+vct1[n]*vct2[n];
}
ang1=acos(sumv/sqrt(sumv1*sumv2))* 180. / pi;
return ang1;
}
//
// ----transform from I,J,K to Z,Y,Z -------
//
void ijktoxyz(short int ijk[3], float xyz[3]) {
xyz[0]=HRTx0+ijk[0]*tmswf[0][0]+ijk[1]*tmswf[0][1]+ijk[2]*tmswf[0][2];
xyz[1]=HRTy0+ijk[0]*tmswf[1][0]+ijk[1]*tmswf[1][1]+ijk[2]*tmswf[1][2];
xyz[2]=HRTz0+ijk[0]*tmswf[2][0]+ijk[1]*tmswf[2][1]+ijk[2]*tmswf[2][2];
}
//
// ----transform coordinate system to I,J,K to establish
// local coordinate system (shift and rotated) -------
//
// step1: shift old system to i,j,k
// the old coordinate system
// step2: rotate x,y,z system to fiber coordinate
// system so that Z axis has direction of
// fibdir(i,j,k) and X axis has direction
// fiber direction and Y axis=Z(x)X
// step3: solve equation
// x=l1*X+l2*Y+l3*Z
// y=m1*X+m2*Y+m3*Z
// z=n1*X+n2*Y+n3*Z
// where l,m,n is the dirction number of axises
// l=cos(alpha), m=cos(beta) and n=cos(theta)
//
float local(short int i, short int j, short int k) {
// ++++ d as a mark of whether the trasform is successful +++
float getAngle(float [], float []);
char iCell;
short int n,iLayer;
int locloc;
float r,d;
// -- step 1 --->
// x=ijk(1)*tmswf(1,1)+ijk(2)*tmswf(1,2)+ijk(3)*tmswf(1,3)
// y=ijk(1)*tmswf(2,1)+ijk(2)*tmswf(2,2)+ijk(3)*tmswf(2,3)
// z=ijk(1)*tmswf(3,1)+ijk(2)*tmswf(3,2)+ijk(3)*tmswf(3,3)
// ---step 2 : Y axis ---->
//
locloc=*(locXCT[k]+NI*j+i);
iCell=*(mapCell[k]+NI*j+i);
if (iCell!=7) iLayer=0;
else iLayer=*(mapACT[k]+j*NI+i)+1;
for (n=0; n<3; n++) {
zaxis[n]=*(fibdir[n]+locloc);
xaxis[n]=planedir[n][iLayer-1];
}
if (zaxis[0]<0.0000001 && zaxis[0]>-0.0000001 &&
zaxis[1]<0.0000001 && zaxis[1]>-0.0000001 &&
zaxis[2]<0.0000001 && zaxis[2]>-0.0000001 ) {
d=0.;
return d;
}
// call outprod(zaxis,xaxis,yaxis)
yaxis[0]=zaxis[1]*xaxis[2]-zaxis[2]*xaxis[1];
yaxis[1]=zaxis[2]*xaxis[0]-zaxis[0]*xaxis[2];
yaxis[2]=zaxis[0]*xaxis[1]-zaxis[1]*xaxis[0];
r=sqrt(yaxis[0]*yaxis[0]+
yaxis[1]*yaxis[1]+yaxis[2]*yaxis[2]);
if (r < 0.0000001) {
d=0.;
return d;
}
yaxis[0]=yaxis[0]/r;
yaxis[1]=yaxis[1]/r;
yaxis[2]=yaxis[2]/r;
// for test
// d=getAngle(xaxis,yaxis);
// TRACE("\nlocal %2d %2d %2d %f ",i,j,k,d);
// write(0,*) i,j,k,ang
// --- step 3 ---->
//
d=xaxis[0]*yaxis[1]*zaxis[2]+xaxis[1]*yaxis[2]*zaxis[0]
+xaxis[2]*yaxis[0]*zaxis[1]-xaxis[2]*yaxis[1]*zaxis[0]
-xaxis[0]*yaxis[2]*zaxis[1]-xaxis[1]*yaxis[0]*zaxis[2];
//TRACE("\nlocal %2d %2d %2d %d %f %f %f %f ",i+1,j+1,k+1,iLayer,xaxis[0],yaxis[0],zaxis[0],d);
return d;
}
//
// ******* calc anisotropic coeffeciante for i,j,k ********
//
void anfct(short int i, short int j, short int k, float v[3]) {
int locanf;
float f[3][3], af[3];
short int m, n;
float tmp,tmp1;
float u[3][3]={
1.,0.,0.,
0.,1.,0.,
0.,0.,1.};
locanf=*(locXCT[k]+NI*j+i);
for (m=0; m<3; m++) {
tmp1=*(fibdir[m]+locanf);
for (n=0; n<3; n++) {
tmp=*(fibdir[n]+locanf);
f[m][n]=u[m][n]+rrat1*tmp1*tmp; //corrected by zhu
}
}
for (m=0; m<3; m++) {
af[m]=f[m][0]*v[0]+f[m][1]*v[1]+f[m][2]*v[2];
}
for (m=0; m<3; m++) {
v[m]=af[m];
}
}
// Read the matrix data of the body (a344.data)
void rdmtx(void) {
short int i;
HFILE hFp;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'m');
//filepath.SetAt(index+2,'t');
//filepath.SetAt(index+3,'x');
//hFp = _lopen(filepath,OF_READ);
hFp=_lopen(dataPath+"tour.mtx ",OF_READ);
if (hFp==HFILE_ERROR)
{
fprintf(stdout,"can not create the file--mtx\n");
fflush(stdout);
flag_flop=1;
return;
}
for (i=0;i<NL;i++)
_lread(hFp,aw[i],NL*4);
_lread(hFp,bw,NL*4);
_lread(hFp,&alp,4);
_lclose(hFp);
}
// Make stiml data for inverse excitation
void stminvx(short int ivolpkj) {
short int ks, nspt, mk, i, j, k;
//idist=20*ND;
idist=ivolpkj;
ks=kVtr+idist;
for (k=kVtr; k<=kBtm; k++) {
for (i=0; i<NI; i++) {
for (j=0; j<NJ; j++) {
if (*(mapCell[k]+j*NI+i)>4)
*(mapAPD[k]+j*NI+i)=7;
else
*(mapAPD[k]+j*NI+i)=0;
}
}
}
mNub=0;
for (k=kVtr; k<NK; k++) {
// back view
for (j=0;j<NJ;j++) {
for (i=0;i<NI;i++) {
if (*(mapAPD[k]+j*NI+i)==6) {
break;
}
if (*(mapAPD[k]+j*NI+i)==7) {
*(mapAPD[k]+j*NI+i)=6;
if (k<=ks) {
*(mag[0]+mNub)=i;
*(mag[1]+mNub)=j;
*(mag[2]+mNub)=k;
*(mag[3]+mNub)=0;
mNub++;
}
break;
}
}
}
// front view
for (j=0; j<NJ; j++) {
for (i=NI-1; i>-1; i--) {
if (*(mapAPD[k]+j*NI+i)==6) {
break;
}
if (*(mapAPD[k]+j*NI+i)==7 ) {
*(mapAPD[k]+j*NI+i)=6;
if (k<=ks) {
*(mag[0]+mNub)=i;
*(mag[1]+mNub)=j;
*(mag[2]+mNub)=k;
*(mag[3]+mNub)=0;
mNub++;
}
break;
}
}
}
// right view
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(mapAPD[k]+j*NI+i)==6) {
break;
} else if (*(mapAPD[k]+j*NI+i)==7) {
*(mapAPD[k]+j*NI+i)=6;
if (k<=ks) {
*(mag[0]+mNub)=i;
*(mag[1]+mNub)=j;
*(mag[2]+mNub)=k;
*(mag[3]+mNub)=0;
mNub++;
}
break;
}
}
}
// left view
for (i=0;i<NI;i++) {
for (j=NJ-1;j>-1;j--) {
if (*(mapAPD[k]+j*NI+i)==6) {
break;
}
if (*(mapAPD[k]+j*NI+i)==7 ) {
*(mapAPD[k]+j*NI+i)=6;
if (k<=ks) {
*(mag[0]+mNub)=i;
*(mag[1]+mNub)=j;
*(mag[2]+mNub)=k;
*(mag[3]+mNub)=0;
mNub++;
}
break;
}
}
}
}
// the most low layer
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(mapAPD[kBtm]+j*NI+i)==7) {
*(mapAPD[kBtm]+j*NI+i)=6;
if (kBtm<=ks) {
*(mag[0]+mNub)=i;
*(mag[1]+mNub)=j;
*(mag[2]+mNub)=kBtm;
*(mag[3]+mNub)=0;
mNub++;
}
}
}
}
// septum setting
nspt=0;
for (k=kVtr;k<=kBtm;k++) {
for (i=0;i<NI;i++) {
mk=0;
for (j=1;j<NJ;j++) {
if ((*(mapAPD[k]+(j-1)*NI+i)==0)&&(*(mapAPD[k]+j*NI+i)==7)&&(mk==1)) {
nspt=nspt+1;
*(mapAPD[k]+j*NI+i)=6;
break;
}
if ((*(mapAPD[k]+j*NI+i)==7)&&(*(mapAPD[k]+(j+1)*NI+i)==0))
mk=1;
}
}
}
// testing
/*
TRACE("\n35\n");
for (j=0;j<NJ;j++) {
for (i=0;i<NI;i++) {
TRACE("%d",*(mapAPD[34]+j*NI+i));
}
TRACE("\n");
}
TRACE("\n40\n");
for (j=0;j<NJ;j++) {
for (i=0;i<NI;i++) {
TRACE("%d",*(mapAPD[39]+j*NI+i));
}
TRACE("\n");
}
*/
}
// APD distribution
void XCTinvcm(void) {
short int * iACTv[3];
short int jACTv[3][NI*ND],kACTv[3][NI*ND];
short int idir[12];
short int iseqx[12]={-1,-1, 0, 0, 1, 0, 1, 1, 0, 0,-1, 0 };
short int iseqy[12]={ 0, 1, 1, 0, 0, 1, 0,-1,-1, 0, 0,-1 };
short int iseqz[12]={ 0, 0, 0,-1,-1,-1, 0, 0, 0, 1, 1, 1 };
short int ix,iy,iz,jx,jy,jz,l;
short int jdist,jx0,jy0,jz0,mappu,mappu0;
long i,j,k,nACTv,mACTv,ncont;
long nblck,nStep,nbrch;
// unsigned char mappu,mappu0;
//idist=20*ND;
//------ initialize mapACT ---------
for(i=0;i<3;i++) {
iACTv[i]=(short int *) malloc(50000*ND3*2);
if(iACTv[i]==NULL) {
MessageBox(NULL,"Out of memory !",NULL,MB_OK);
flag_flop=1;
return;
}
}
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=0;i<NI;i++) {
*(mapACT[k]+j*NI+i)=0;
}
}
}
for(i=0;i<3;i++) {
for(j=0;j<50000*ND3;j++) {
*(iACTv[i]+j)=0;
}
}
nblck=0;
ic=0;
nACTv=0;
// mapAPD[]: a map contains value = 6 (boundary) and value = 7 (ventricular)
while (1) {
// TRACE("\nmNub = %d",mNub);
for(i=0;i<mNub;i++) { //for example, mNub=12322
if (*(mag[3]+i)!=ic) continue;
jx=*(mag[0]+i);
jy=*(mag[1]+i);
jz=*(mag[2]+i);
mappu=*(mapAPD[jz]+jy*NI+jx);
// nACTv=nACTv+1;
*(iACTv[0]+nACTv)=jx;
*(iACTv[1]+nACTv)=jy;
*(iACTv[2]+nACTv)=jz;
*(mapACT[jz]+jy*NI+jx)=ic;
// *(mapAPD[jz]+jy*NI+jx)=mappu+20;
*(mapAPD[jz]+jy*NI+jx)=mappu+20*ND;
nACTv++;
}
ic=ic+1;
// TRACE("\nnACTv= %2d %5d ", ic, nACTv);
nACTv=0;
for(k=kVtr;k<=kBtm;k++) {
for(i=0;i<NI;i++) {
for(j=0;j<NJ;j++) {
mappu=*(mapAPD[k]+j*NI+i);
if((mappu<6)||(mappu>7)) continue; // exculde 0 and others, if any
ncont=0;
for(l=0;l<12;l++) {
idir[l]=0;
ix=i+iseqx[l];
if((ix<0)||(ix>(NI-1))) continue;
iy=j+iseqy[l];
if((iy<0)||(iy>(NJ-1))) continue;
iz=k+iseqz[l];
if((iz<kVtr)||(iz>kBtm)) continue;
mappu0=*(mapAPD[iz]+iy*NI+ix);
if((mappu0<20*ND+6)||(mappu0>20*ND+7)) continue;
ncont=ncont+1;
}
if(ncont==0) continue;
// if((mappu==6)||(mappu==7)) mappu=mappu+10;
// *(mapAPD[k]+j*NI+i)=mappu;
if((mappu==6)||(mappu==7)) *(mapAPD[k]+j*NI+i)+=10*ND;
// nACTv=nACTv+1;
*(iACTv[0]+nACTv)=i;
*(iACTv[1]+nACTv)=j;
*(iACTv[2]+nACTv)=k;
*(mapACT[k]+j*NI+i)=ic;
nACTv++;
//if(k==120 && *(mapACT[k]+j*NI+i) > 0) {
// TRACE(" %d ",*(mapACT[k]+j*NI+i));
//}
}
}
}
// Conductive system
mACTv=nACTv;
for (i=0;i<nACTv;i++) {
jx=*(iACTv[0]+i);
jy=*(iACTv[1]+i);
jz=*(iACTv[2]+i);
mappu=*(mapAPD[jz]+jy*NI+jx);
if (mappu != 10*ND+6)
continue;
jACTv[0][0]=jx;
jACTv[1][0]=jy;
jACTv[2][0]=jz;
nStep=0;
nbrch=1;
jdist=1;
while (1) {
for (j=0;j<nbrch;j++) {
jx0=jACTv[0][j];
jy0=jACTv[1][j];
jz0=jACTv[2][j];
for (l=0;l<12;l++) {
jx=jx0+iseqx[l];
if ((jx<=-1)||(jx>NI-1)) continue; // <0
jy=jy0+iseqy[l];
if ((jy<=-1)||(jy>NJ-1)) continue; // <0
jz=jz0+iseqz[l];
if ((jz<kVtr)||(jz>kBtm)) continue;
mappu=*(mapAPD[jz]+jy*NI+jx);
if (mappu!=6) continue;
kACTv[0][nStep]=jx;
kACTv[1][nStep]=jy;
kACTv[2][nStep]=jz;
nStep++;
*(iACTv[0]+mACTv)=jx;
*(iACTv[1]+mACTv)=jy;
*(iACTv[2]+mACTv)=jz;
*(mapACT[jz]+jy*NI+jx)=ic;
*(mapAPD[jz]+jy*NI+jx)=mappu+10*ND;
mACTv++;
}
}
if (nStep==0) break;
jdist=jdist+1;
for (k=0;k<nStep;k++) {
jACTv[0][k]=kACTv[0][k];
jACTv[1][k]=kACTv[1][k];
jACTv[2][k]=kACTv[2][k];
}
if (jdist>=idist) break;
nbrch=nStep;
nStep=0;
}
}
nACTv=mACTv;
// The next circle
for (i=0;i<NI;i++)
for (j=0;j<NJ;j++)
for (k=kVtr;k<=kBtm;k++) {
mappu=*(mapAPD[k]+j*NI+i);
if ((mappu>30*ND+7)||(mappu<10*ND+6)) continue;
*(mapAPD[k]+j*NI+i)=mappu+10*ND;
}
if((nblck!=0)&&(nACTv==0)) break;
nblck=nblck+nACTv;
}
maxlay=ic+1;
// Display
// Steps were needed to compute excitation
// ventricular processes are completed.
// total excited units = nblck
// TRACE("\n%d steps were needed to compute excitation, total excited units = %d ", ic, nblck);
/*
TRACE("\nk= 35\n");
for (i=0; i<NI; i++) {
TRACE("i= %d\n",i);
for (j=0; j<NJ; j++) {
TRACE("%2d",*(mapACT[35-1]+j*NI+i));
}
}
TRACE("\nk= 40\n");
for (i=0; i<NI; i++) {
TRACE("i= %d\n",i);
for (j=0; j<NJ; j++) {
TRACE("%2d",*(mapACT[40-1]+j*NI+i));
}
}
*/
for (i=0;i<3;i++) {
free(iACTv[i]);
}
}
// mapACT <-- deference of Phase 2 from defined value (ms)
void savACT(void) {
char iCell;
short int i,j,k,m;
int idev,init,md;
HFILE hFp;
for (k=0;k<NK;k++)
for (i=0;i<NI;i++)
for (j=0;j<NJ;j++) {
if (*(mapACT[k]+j*NI+i)<1) {
*(mapAPD[k]+j*NI+i)=*(mapACT[k]+j*NI+i);
continue;
}
iCell=*(mapCell[k]+j*NI+i);
if (iCell==15) {
*(mapAPD[k]+j*NI+i)=*(mapACT[k]+j*NI+i);
continue;
}
//*(mapACT[k]+j*NI+i)*= *(iparm+(iCell-1)*NPARM+9);
*(mapAPD[k]+j*NI+i)=*(mapACT[k]+j*NI+i)*(*(iparm+(iCell-1)*NPARM+9));
}
// Random distribution of the APD
for (iCell=1;iCell<=NCELL;iCell++) {
if (*(iparm+(iCell-1)*NPARM+11)<=0) continue;
idev=*(iparm+(iCell-1)*NPARM+11)*(*(iparm+(iCell-1)*NPARM+2));
init=idev;
for (k=0;k<NK;k++) {
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(mapCell[k]+j*NI+i)!=iCell)
continue;
init=init*65+1;
md=init%256;
*(mapAPD[k]+j*NI+i) = (short int)(idev*(md-128)/12800);
init=md;
}
}
}
}
// Save file of ACT
CFile f;
CFileException e;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'a');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'t');
//hFp=_lcreat(dataPath+"tour.act ",0);
//if (hFp==HFILE_ERROR)
//{
// fprintf(stdout,"can not create the file--act\n");
// fflush(stdout);
// return;
//}
if (!f.Open( dataPath+"tour.act ", CFile::modeCreate | CFile::modeWrite, &e )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e.m_cause << "\n";
#endif
}
// if (!f.Open( filepath, CFile::modeCreate | CFile::modeWrite, &e )) {
//#ifdef _DEBUG
// afxDump << "File could not be opened " << e.m_cause << "\n";
//#endif
// }
f.Write(kmin,2*NI*NJ);
f.Write(kmax,2*NI*NJ);
f.Write(&ic,2);
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(kmin+j*NI+i)==NK+1)
continue;
for (k=*(kmin+j*NI+i);k<=*(kmax+j*NI+i);k++)
m=*(mapAPD[k]+j*NI+i)/6/ND;
f.Write(&m,2); // hui modify from 1 to 2
}
}
f.Close();
}
// mapACT <-- deference of Phase 2 from defined value (ms)
/*
void savACT(int myid) {
char iCell;
short int i,j,k,m;
int idev,init,md;
HFILE hFp;
for (k=0;k<NK;k++)
for (i=0;i<NI;i++)
for (j=0;j<NJ;j++) {
if (*(mapACT[k]+j*NI+i)<1) {
*(mapAPD[k]+j*NI+i)=*(mapACT[k]+j*NI+i);
continue;
}
iCell=*(mapCell[k]+j*NI+i);
if (iCell==15) {
*(mapAPD[k]+j*NI+i)=*(mapACT[k]+j*NI+i);
continue;
}
//*(mapACT[k]+j*NI+i)*= *(iparm+(iCell-1)*NPARM+9);
*(mapAPD[k]+j*NI+i)=*(mapACT[k]+j*NI+i)*(*(iparm+(iCell-1)*NPARM+9));
}
// Random distribution of the APD
for (iCell=1;iCell<=NCELL;iCell++) {
if (*(iparm+(iCell-1)*NPARM+11)<=0) continue;
idev=*(iparm+(iCell-1)*NPARM+11)*(*(iparm+(iCell-1)*NPARM+2));
init=idev;
for (k=0;k<NK;k++) {
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(mapCell[k]+j*NI+i)!=iCell)
continue;
init=init*65+1;
md=init%256;
*(mapAPD[k]+j*NI+i) = (short int)(idev*(md-128)/12800);
init=md;
}
}
}
}
// Save file of ACT
CFile f;
CFileException e;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'a');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'t');
//hFp=_lcreat(dataPath+"tour.act ",0);
//if (hFp==HFILE_ERROR)
//{
// fprintf(stdout,"can not create the file--act\n");
// fflush(stdout);
// return;
//}
if (myid==0){
if (!f.Open( dataPath+"tour.act ", CFile::modeCreate | CFile::modeWrite, &e )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e.m_cause << "\n";
#endif
}
// if (!f.Open( filepath, CFile::modeCreate | CFile::modeWrite, &e )) {
//#ifdef _DEBUG
// afxDump << "File could not be opened " << e.m_cause << "\n";
//#endif
// }
f.Write(kmin,2*NI*NJ);
f.Write(kmax,2*NI*NJ);
f.Write(&ic,2);
};
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(kmin+j*NI+i)==NK+1)
continue;
for (k=*(kmin+j*NI+i);k<=*(kmax+j*NI+i);k++)
m=*(mapAPD[k]+j*NI+i)/6/ND;
if (myid==0) f.Write(&m,2); // hui modify from 1 to 2
}
}
if (myid==0) f.Close();
}
*/
// **************** sub excitation ********************
void XCTcalm(int myid) {
// FILE *fp;
void wtXCTm(short int,short int,short int,short int);
void bbDLYm(short int,short int,short int);
void rdXCTm(short int,short int,short int,short int);
short int itmp, tmp;
short int iStm,ires,irp,irel,ist,kBB;
float phsft,mxDLY,mACCl,icross,delt;
char mCell,iCell,kCell;
short int *iACTv[4];
short int *iACTvOld[4];
short int *jACTv[4];
short int *kACTv[4];
short int *iXCT[NK];
short int *iXCTapd[NK];
short int *iXCTOld[NK];
short int iseqx[12]={-1,-1, 0, 0, 1, 0, 1, 1, 0, 0,-1, 0};
short int iseqy[12]={ 0, 1, 1, 0, 0, 1, 0,-1,-1, 0, 0,-1};
short int iseqz[12]={ 0, 0, 0,-1,-1,-1, 0, 0, 0, 1, 1, 1};
short int ix,iy,iz,jx,jy,jz,iv,l;
short int jdist,jx0,jy0,jz0,is,ICL,ivel;
short int iSTOP, iS1S2, dS1S2Old, iCell5Ex;
long i,j,k,nACTv,mACTv,nACTvOld;
long nblck,nStep,nbrch;
// >>>>>>> aniso >>>>>>
float xani,yani,zani,dani,elp;
float dxani,dyani,dzani;
short int itms1=0;
// ---- for vtr aniso use
// storing the ellipsoid propagation times ---
//--------- maximum excitation time Step: maxXctStep -------------
for(i=0;i<4;i++) {
iACTv[i] = (short int *) malloc(50000*ND3*2);
iACTvOld[i] = (short int *) malloc(50000*ND3*2);
jACTv[i] = (short int *) malloc(50000*ND3*2);
kACTv[i] = (short int *) malloc(50000*ND3*2);
if((iACTv[i]==NULL)||(iACTvOld[i]==NULL)||
(jACTv[i]==NULL)||(kACTv[i]==NULL)) {
MessageBox(NULL,"Out of memory !",NULL,MB_OK);
return;
}
}
for(i=0;i<NK;i++) {
iXCT[i] = (short int *) malloc(NI*NJ*2);
iXCTapd[i] = (short int *) malloc(NI*NJ*2);
iXCTOld[i] = (short int *) malloc(NI*NJ*2);
if((iXCT[i]==NULL)||(iXCTOld[i]==NULL)) {
MessageBox(NULL,"Out of memory !",NULL,MB_OK);
return;
}
}
for(i=0;i<4;i++) {
for(j=0;j<50000*ND3;j++) {
*(iACTv[i]+j)=0;
*(iACTvOld[i]+j)=0;
*(jACTv[i]+j)=0;
*(kACTv[i]+j)=0;
}
}
// --- file mapXCT is initialized with INFTIME ----
for(i=0;i<NCYCL;i++) {
for(j=0;j<50000*ND3;j++) {
*(mapXCTm[i]+j)=INFTIME;
}
}
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=0;i<NI;i++) {
*(iXCT[k]+j*NI+i)=INFTIME;
*(iXCTapd[k]+j*NI+i)=0;
*(iXCTOld[k]+j*NI+i)=INFTIME;
}
}
}
mxcycle=0;
short int tested[NCELL];
for(i=0;i<NCELL;i++)
tested[i]=0;
for(i=0;i<nttl;i++) {
jx=ipttl[0][i]; /*<Comment by ALF> pos of ith cell*/
jy=ipttl[1][i];
jz=ipttl[2][i];
iCell=*(mapCell[jz]+jy*NI+jx); /*<Comment by ALF> cell type index */
if(tested[iCell-1]==0)
{*(iparm+(iCell-1)*NPARM+18)+=ipttl[3][i];tested[iCell-1]=1;
if (iCell!=1) {*(iparm+(1-1)*NPARM+18)+=ipttl[3][i];//maxXctStep+=ipttl[3][i];
}
}
//TRACE("\nNTTL (%3d %3d %3d) %2d",jx,jy,jz,iCell);
// set pacemake time of no. 5 cells
if (iCell==5) {
ipstm[0][i]=100*ND/(ipttl[3][i]+1);
if((ipstm[0][i]*ipttl[3][i])<100*ND) ipstm[0][i]+=1;
//ipstm[0][i]=100/(ipttl[3][i]+1);
//if((ipstm[0][i]*ipttl[3][i])<100) ipstm[0][i]+=1;
//TRACE("\nCell 5, (%d %d %d) %d %d",jx,jy,jz, ipttl[3][i],ipstm[0][i]);
continue;
}
// iparm(n,18) = BCL basic cycle length (ms) of pacing
// iparm(n,20) = inc increament of BCL(ms/cycle)
ipstm[0][i]=*(iparm+(iCell-1)*NPARM+17);
ipstm[1][i]=*(iparm+(iCell-1)*NPARM+19);
ipstm[2][i]=0;
}
nblck=0;
ic=0;
nACTv=0;
iS1S2=0;
iCell5Ex=0;
// ------ stimulus: pacemaker spontanous firing -------
while (1) {
// In this loop, ipttl[3][i] is mainly used to
// decide ipstm[0][i] and itself
jx=0;
jy=0;
jz=0;
iStm=0;
excited=0;
for (i=0;i<nttl;i++) {
jx=ipttl[0][i];
jy=ipttl[1][i];
jz=ipttl[2][i];
iStm=ipttl[3][i];
iCell=*(mapCell[jz]+jy*NI+jx);
//TRACE("\nStimulus (%3d %3d %3d)%2d %d %d",jx,jy,jz,iCell,iStm, mxcycle);
//TRACE("\nbreak1 mxcycle=%d NCYCL=%d ic=%d iCell=%d, iStm=%d, mS2BN=%d,ipstm=%d",mxcycle, NCYCL,ic, iCell, iStm,*(iparm+(iCell-1)*NPARM+18),ipstm[0][i]);
if (iCell==5) continue; // ignore BB
if (iStm != ic) continue;
// ic: i-th time Step
// nACTv: number of exitation cells at ic time but cellType != 5 (BB)
// --- end ---
//TRACE("\nbreak1 mxcycle=%d NCYCL=%d ic=%d iCell=%d, iStm=%d",mxcycle, NCYCL,ic, iCell, iStm);
nACTv=nACTv+1;
*(iACTv[0]+nACTv)=jx;
*(iACTv[1]+nACTv)=jy;
*(iACTv[2]+nACTv)=jz;
*(iACTv[3]+nACTv)=*(iparm+(iCell-1)*NPARM+31); /*<Comment by ALF> iparm store each cell's parameters*/
// iparm(n,32): conduction speed
wtXCTm(ic,jx,jy,jz);
//if (jx==101 && jy==77 && jz==6) TRACE("\nA mxcycle=%d at ic=%d, iCell=%d",mxcycle,ic,iCell);
//if (iCell <3) TRACE("\nA %d %d %d %d %d %d",iCell,jx,jy,jz,ic,nACTv);
// write to file
// mxcycle: maximum cycle
if(mxcycle>=NCYCL) {
break;
}
// --- store current time to iXCT and last time to iXCTOld -->
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx); // init is INFTIME
*(iXCT[jz]+jy*NI+jx)=ic;
excited=1;
// Update ipttl[3][i]
// iparm(n,18) = BCL: basic cycle length (ms) of pacing
// Normally, only SN has this parameter > 0
/*if(*(iparm+(iCell-1)*NPARM+17)>0) {
if ((iS1S2==1) && (mS2BN>1)) {
itmp=ipttl[3][i]+mS2CL;
mS2BN--;
} else {
itmp=ipttl[3][i]+ipstm[0][i];
}
dS1S2Old=ipstm[2][i];
ipstm[0][i] = ipstm[0][i] + ipstm[1][i];
iCell5Ex=0;
// ipstm[1][i] is the step
// iparm(n,19) = pBN: beat number
// judge by ipttl[3][i]
if(itmp>*(iparm+(iCell-1)*NPARM+18)) continue;
if ((mS2ST/3 > ipttl[3][i]) &&(mS2ST/3 < itmp)) {
ipttl[3][i]=(short int)(mS2ST/3);
iS1S2=1;
} else {
ipttl[3][i]=itmp;
}
ipstm[2][i]=itmp-ipttl[3][i];
//TRACE("\nTime=%d, %d, %d, %d, %d %d",ic,itmp,ipttl[3][i],dS1S2Old, ipstm[0][i],ipstm[1][i]);
continue;
}*/
if(*(iparm+(iCell-1)*NPARM+17)>0) {
if (iCell==1) {
itmp=ipttl[3][i]+ipstm[0][i];
dS1S2Old=ipstm[2][i];
ipstm[0][i] = ipstm[0][i] + ipstm[1][i];
iCell5Ex=0;
if(itmp>*(iparm+(iCell-1)*NPARM+18)) continue;
ipttl[3][i]=itmp; continue;}
else
{
itmp=ipttl[3][i]+ipstm[0][i];
dS1S2Old=ipstm[2][i];
ipstm[0][i] = ipstm[0][i] + ipstm[1][i];
iCell5Ex=0;
if(itmp>*(iparm+(iCell-1)*NPARM+18)-ipstm[0][i]+3) continue;
ipttl[3][i]=itmp;
}
continue;
}
// iparm(n,24) = ICL: intrinsic cycle length(ms)
ipttl[3][i] = ipttl[3][i] + *(iparm+(iCell-1)*NPARM+23);
}
// ---- display the excitation number ----
// go to next Step
nblck = nblck + nACTv;
//TRACE("\nmxcycle =%d Step=%3d, number=%ld nblck=%ld ",mxcycle,ic,nACTv, nblck);
ic = ic + 1;
//TRACE("\nbreak2 ic=%d maxXctStep=%d ",ic, maxXctStep);
if (ic>=maxXctStep) break;
if (nACTv == 0) continue;
/**
* very important
*/
// --------- propagation (2000)------------>
nACTvOld=0;
// nACTv: at moment t, the number of excited cells
for (i=1;i<=nACTv;i++) {
excited=1;
ix=*(iACTv[0]+i);
iy=*(iACTv[1]+i);
iz=*(iACTv[2]+i);
iv=*(iACTv[3]+i);
iCell=*(mapCell[iz]+iy*NI+ix);
//if (ix == 64 && iy == 50 && iz == 64) TRACE("\nB AVN %d",iCell);
//----------- low conduction speed part ----------->
// iparm(n,32): conduction speed
if (*(iparm+(iCell-1)*NPARM+31)<=0) continue;
if (iCell==5) iCell5Ex=1;
//if (iCell==8) TRACE("\nCell=8 %d, %d, %d, ic=%d, %d %d",ix,iy,iz,ic,iv,mBCL);
// 100 = Conduction Speed of ATR?
if (iv<100) {
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=ix;
*(iACTvOld[1]+nACTvOld)=iy;
*(iACTvOld[2]+nACTvOld)=iz;
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)+*(mapSpeed[iz]+iy*NI+ix); //added by zhu
if (iCell==5) {
/*ibbDLY=0;
bbDLYm(ix,iy,iz);
if (ibbDLY>0)
*(iACTvOld[3]+nACTvOld)=iv+ibbDLY;
TRACE("\nBB, %d",*(iACTvOld[3]+nACTvOld));
*/
ibbDLY=0;
// Add for BB interval by hui wang
ibbSTEP=0;
bbDLYm(ix,iy,iz);
// End of add for BB interval by hui wang, modified by zhu
if (ibbDLY>0) {ibbSTEP+=nbbSTEP;ibbDLY=100*ND/(ibbSTEP+1);}
if(ibbDLY>0 && (ibbDLY*ibbSTEP)<100*ND) ibbDLY+=1;
if (ibbDLY>0)
*(iACTvOld[3]+nACTvOld)=iv+ibbDLY;
else
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31);
continue;
}
/*if (iCell==3 || iCell==6) {
if (*(iXCTOld[iz]+iy*NI+ix)==INFTIME)
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31);
else {
irel = *(iXCT[iz]+iy*NI+ix)-*(iXCTOld[iz]+iy*NI+ix)-(*(iparm+NPARM*(iCell-1)+4)+*(mapAPD[iz]+iy*NI+ix))/3;
//irel = *(iXCT[iz]+iy*NI+ix)-*(iXCTOld[iz]+iy*NI+ix)-(*(iparm+NPARM*(iCell-1)+4))/3;
irel = 3*irel;
if (irel<*(iparm+NPARM*(iCell-1)+5)) {
tmp=100+*(iparm+NPARM*(iCell-1)+32)
-irel*(*(iparm+NPARM*(iCell-1)+32))/(*(iparm+NPARM*(iCell-1)+5));
if (tmp!=0) {
ivel = 100*(*(iparm+NPARM*(iCell-1)+31))/tmp;
} else {
ivel=*(iparm+NPARM*(iCell-1)+31);
}
} else {
// <--- time of RRP stored in iparm(6) ---
ivel=*(iparm+NPARM*(iCell-1)+31);
}
*(iACTvOld[3]+nACTvOld)=iv+ivel;}
}*/
/*else if (iCell==3) {
if (iCell5Ex==0) {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)- dS1S2Old/20;
TRACE("\nCell=3 E dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else {
if (mBCL<600&&dS1S2Old<140/3) {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)- (dS1S2Old+67)/33;
TRACE("\nCell=3 A dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else if (mBCL<600&&dS1S2Old>=140/3) {
*(iACTvOld[3]+nACTvOld)=iv;
TRACE("\nCell=3 B dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else if (mBCL>=600&&dS1S2Old<=210/3) {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31);
TRACE("\nCell=3 C dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)- dS1S2Old/12;
TRACE("\nCell=3 D dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
}
}
}*/
continue;
}
// ------- neighbourhood search (2100) -------->
// go to iv > 100 situation and set ires = the part of iv < 100
ires=iv-100*(int)(iv/100);
for (l=0;l<12;l++) {
jx=ix+iseqx[l];
if ((jx<=-1)||(jx>(NI-1))) continue; // >=0 <NI
jy=iy+iseqy[l];
if ((jy<=-1)||(jy>(NJ-1))) continue; // >=0 <NJ
jz=iz+iseqz[l];
if ((jz<=-1)||(jz>(NK-1))) continue; // >=0 <NK
// >>>>> aniso: within the ellpisoid ? >>>>>>>>>>>>
if (ANISO==1 && iCell==7) {
dani=local(ix,iy,iz);
//TRACE("\nx,y,z,dani, %2d %2d %2d %f",ix,iy,iz,dani);
// -- if can't solve local coordinates, treat as isotropic -->
if (dani > 0.0001) {
//lctran(iseqx[l],iseqy[l],iseqz[l],dani,xani,yani,zani);
xani=iseqx[l]*tmswf[0][0]+iseqy[l]*tmswf[0][1]+iseqz[l]*tmswf[0][2];
yani=iseqx[l]*tmswf[1][0]+iseqy[l]*tmswf[1][1]+iseqz[l]*tmswf[1][2];
zani=iseqx[l]*tmswf[2][0]+iseqy[l]*tmswf[2][1]+iseqz[l]*tmswf[2][2];
dxani=xani*yaxis[1]*zaxis[2]+yani*yaxis[2]*zaxis[0]
+zani*yaxis[0]*zaxis[1]-zani*yaxis[1]*zaxis[0]
-xani*yaxis[2]*zaxis[1]-yani*yaxis[0]*zaxis[2];
dyani=xaxis[0]*yani*zaxis[2]+xaxis[1]*zani*zaxis[0]
+xaxis[2]*xani*zaxis[1]-xaxis[2]*yani*zaxis[0]
-xaxis[0]*zani*zaxis[1]-xaxis[1]*xani*zaxis[2];
dzani=xaxis[0]*yaxis[1]*zani+xaxis[1]*yaxis[2]*xani
+xaxis[2]*yaxis[0]*yani-xaxis[2]*yaxis[1]*xani
-xaxis[0]*yaxis[2]*yani-xaxis[1]*yaxis[0]*zani;
xani=dxani/dani;
yani=dyani/dani;
zani=dzani/dani;
// itms=maps(ix,iy,iz)+1
//TRACE("\nd %f %f %f %f",dxani,xani,yaxis[0],zaxis[0]);
itms1=*(iXCTapd[iz]+iy*NI+ix);
elp=xani*xani/vt2[itms1]+
yani*yani/vt2[itms1]+
zani*zani/vl2[itms1];
// write(0,*) x,y,z,elp
// TRACE("\n %d %f",itms1,elp);
if (elp > 1.0) continue;
}
}
// <<<<<<<<<<<<<<<<<<<< aniso <<<<<<<<<<<<<<<<<<<
mCell=*(mapCell[jz]+jy*NI+jx);
if ((iCell<=7)&&(mCell<=7)&&(((iCell-mCell)>1)||
((iCell-mCell)<-1))) continue;
if ((*(iparm+NPARM*(mCell-1)+33)>0)&&( (mCell>7 && iCell>7 && mCell!=iCell) || (mCell<=7 && iCell>mCell) || (mCell>7 && !(iCell==mCell || iCell==2)))) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)>0)&&( (mCell<=7 && iCell>mCell) || (mCell>7 && !(iCell==mCell || iCell==2)))) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)>0)&&(iCell>mCell)) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)<0)&&(iCell<mCell)) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)<0)&&( (mCell<=7 && iCell<mCell) || (mCell>7 && !(iCell==mCell || iCell==7)))) continue;
if ((*(iparm+NPARM*(mCell-1)+33)<0)&&( (mCell>7 && iCell>7 && mCell!=iCell) || (mCell<=7 && iCell<mCell) || (mCell>7 && !(iCell==mCell || iCell==7)))) continue;
//if (jx == 64 && jy == 50 && jz == 64) TRACE("\nC AVN %d",mCell);
if (mCell<=0) continue; // continue;
if (mCell>=15) continue; // continue;
if (mCell==5) iCell5Ex=1;
//if (iCell==8) TRACE("\nCell=8 %d, %d, %d, ic=%d, %d %d",jx,jy,jz,ic);;
// --- coupling interval ------>
idltt=ic-*(iXCT[jz]+jy*NI+jx);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) idltt=INFTIME;
// --- change in cycle length ------>
idltc=idltt+*(iXCTOld[jz]+jy*NI+jx)-*(iXCT[jz]+jy*NI+jx);
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME) idltc=0;
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
*(iparm+(1-1)*NPARM+23)>0)
idltc = ic-*(iXCT[jz]+jy*NI+jx)-*(iparm+(1-1)*NPARM+23);
// rdXCT(ic,jx,jy,jz);
// irp = time in phase 2 + mapACT/3 +plateau of potential in phase 3 *idltc/100
// --- absolute refractory period ------>
//irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3
// +*(iparm+(mCell-1)*NPARM+10)*idltc/100;
irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3;
//if (mCell==6)
// irp=(*(iparm+NPARM*(mCell-1)+4))/3;
irel=idltt-irp;
// ++++++++ in absolute refractory period ? +++++++
if (irel<=0) continue;
//if (*(mapAPD[jz]+jy*NI+jx)>20) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>20 && idltc<0) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>30 && idltc<0) idltc = 3*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>30 && idltc>0) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>40 && idltc>0) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>40 && idltc>0) idltc = 2*idltc;
//if (mCell==3) TRACE("\nCell=3 %d, %d, %d,%d,%d,%d",irp,ic,*(iXCT[jz]+jy*NI+jx),*(mapAPD[jz]+jy*NI+jx),*(iparm+(mCell-1)*NPARM+10),idltc);
*(mapAPD[jz]+jy*NI+jx) += *(iparm+(mCell-1)*NPARM+10)*idltc*3/100; //added by Zhu
//if (mCell==3) TRACE("\nCell=3 %d, %d, %d",*(mapAPD[jz]+jy*NI+jx)/3,idltc,irp);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME && mCell==3) {irel=INFTIME;*(mapAPD[jz]+jy*NI+jx)=0;}
// --- find automaticity in stimul data ----
// iparm(n,24), ICL: intrinsic cycle length (ms)
iSTOP =0;
if (*(iparm+NPARM*(mCell-1)+23)>0) { // !=0 August 10, 1996
// <--- next touch time should be beyound ARP of the cell --
for (is=0;is<nttl;is++) {
if (jx!=ipttl[0][is]) continue;
if (jy!=ipttl[1][is]) continue;
if (jz!=ipttl[2][is]) continue;
// --- iparm(23) used for adjusting intermediate change
// of EP intrinsic cycle length --->
ICL = *(iparm+NPARM*(mCell-1)+23);
ist = ic-*(iXCT[jz]+jy*NI+jx);
// PRT: protection indicator
// --- no protection ---->
if (*(iparm+NPARM*(mCell-1)+24)==0) {
if (ist<=irp) continue; //{iSTOP=1;break;}
//if (iSTOP==1)
ipttl[3][is]=ic+ICL; // ICL/3
/******************/
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=jx;
*(iACTvOld[1]+nACTvOld)=jy;
*(iACTvOld[2]+nACTvOld)=jz;
*(iACTvOld[3]+nACTvOld)=*(iparm+NPARM*(mCell-1)+31)+ires;
wtXCTm(ic,jx,jy,jz);
if (mxcycle>=NCYCL) {iSTOP=1;break;}
//if (ic==*(iXCT[jz]+jy*NI+jx)) continue;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;//added to by zhu
irel=0;
excited=1;
TRACE("\n %d, %d %d %d %d %d",*(iXCTOld[jz]+jy*NI+jx),*(iXCT[jz]+jy*NI+jx),ic,jx,jy,jz);
/******************/
//iSTOP=1;
continue; //break; // rewrite condition
}
if (idltt==INFTIME) continue;
//ist = ic-*(iXCT[jz]+jy*NI+jx);
// if (ist<=irp) goto loop21; // August 10, 1996
if (ist<=irp) continue; //{iSTOP=1;break;}
phsft =(float)100.*(idltt/ICL);
mxDLY =(float)*(iparm+NPARM*(mCell-1)+25);
mACCl =(float)*(iparm+NPARM*(mCell-1)+26);
if (mxDLY == 0 && mACCl == 0) continue;
icross=(float)*(iparm+NPARM*(mCell-1)+27);
if (icross == 0 || icross == 100) continue;
if (phsft<=icross)
delt=phsft*mxDLY/icross;
else
delt=mACCl-(phsft-icross)*mACCl/(100-icross);
// -- store touch time --->
// -- modify next stimulating time --->
ipttl[3][is]=ipttl[3][is]+(int)(ICL*delt/100);
//TRACE("\ntime=%4d,ixt=%4d,idltt=%4d,icl=%4d,phsft=%4d,intermediate=%4d",
// ic, *(iXCT[jz]+jy*NI+jx),idltt,ICL, (int)phsft, ipttl[3][is]);
// change value after each touch time
// avoiding from successive modification by surrounding cells
if (ic==*(iXCT[jz]+jy*NI+jx)) continue;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
irel=0;
excited=1;
//iSTOP=1;
continue; //break; // rewrite condition
}
}
if (iSTOP==1) continue;
if (irel==0) continue;
// +++++ special processing for BB +++++
if (mCell==5) {
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
// Add for BB interval by hui wang modified by Zhu
// variable ibbSTEP, nbbSTEP are added to store steps by first BB
// ibbSTEP is a function in bbDLYm(i,j,k)
nbbSTEP=0;
ibbDLY=0;
ibbSTEP=0;
bbDLYm(jx,jy,jz);
nbbSTEP=ibbSTEP;
// end of add for BB interval by hui wang
//ic+=10; // add by hw, BB interval
//TRACE("\n nHB = %d, ic= %d",nHB,ic);
for(kBB=0;kBB<nBB;kBB++) {
jx=iBB[0][kBB];
jy=iBB[1][kBB];
jz=iBB[2][kBB];
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=jx;
*(iACTvOld[1]+nACTvOld)=jy;
*(iACTvOld[2]+nACTvOld)=jz;
//*(iACTvOld[3]+nACTvOld)=100;
ibbDLY=0;
// Add for BB interval by hui wang,modified by zhu
ibbSTEP=0;
bbDLYm(jx,jy,jz);
ibbSTEP+=nbbSTEP;
ibbDLY=100*ND/(ibbSTEP+1);
if((ibbDLY*ibbSTEP)<100*ND) ibbDLY+=1;
// End of add for BB interval by hui wang
*(iACTvOld[3]+nACTvOld)=ibbDLY;
wtXCTm(ic,jx,jy,jz);
//if (jx==101 && jy==77 && jz==6) TRACE("\nB mxcycle=%d at ic=%d, iCell=%d",mxcycle,ic,mCell);
//if (mCell >2 && mCell <6) TRACE("\nB %d %d %d %d %d %d",mCell,jx,jy,jz,ic,nACTvOld);
if (mxcycle>=NCYCL) break;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
excited=1;
}
continue;
}
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=jx;
*(iACTvOld[1]+nACTvOld)=jy;
*(iACTvOld[2]+nACTvOld)=jz;
wtXCTm(ic,jx,jy,jz);
//TRACE("\nbreak3 mxcycle=%d NCYCL=%d ",mxcycle, NCYCL);
if (mxcycle>=NCYCL) break;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
irel = 3*irel;
//if (*(iXCTOld[jz]+jy*NI+jx)!=INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
// irel>*(iparm+NPARM*(mCell-1)+5))
// *(mapAPD[jz]+jy*NI+jx)=0;
// time of RRP stored in iparm(6)
if ((irel)<*(iparm+NPARM*(mCell-1)+5)) {
tmp=100+*(iparm+NPARM*(mCell-1)+32)
-irel*(*(iparm+NPARM*(mCell-1)+32))/(*(iparm+NPARM*(mCell-1)+5));
if (tmp!=0) {
ivel = 100*(*(iparm+NPARM*(mCell-1)+31))/tmp;
} else {
ivel=*(iparm+NPARM*(mCell-1)+31);
}
} else {
// <--- time of RRP stored in iparm(6) ---
ivel=*(iparm+NPARM*(mCell-1)+31);
}
*(mapSpeed[jz]+jy*NI+jx)=ivel-*(iparm+NPARM*(mCell-1)+31);//added by Zhu
// test results
//TRACE("\nmcell=%4d,ic=%4d,idltt=%4d,idltc=%4d,ivel=%4d",mCell,ic,idltt,idltc,ivel);
if (iCell!=mCell) {
if (mCell == 5) {
bbDLYm(jx,jy,jz);
*(iACTvOld[3]+nACTvOld)=ibbDLY;
//TRACE("\n BB2=%d, %d %d (%d %d %d) ic=%d ",*(iACTvOld[3]+nACTvOld),iv,ibbDLY,ix,iy,iz, ic);
continue;
}
*(iACTvOld[3]+nACTvOld)=ivel;
continue;
}
*(iACTvOld[3]+nACTvOld)=ivel+ires;
}
// <------- END of neighbourhood search (2100) -----
// >>>>>>>> anisotropy >>>>>
if (ANISO==1 && iCell == 7) {
// ltrat==2;
if (*(iXCTapd[iz]+iy*NI+ix) < 2) {
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=ix;
*(iACTvOld[1]+nACTvOld)=iy;
*(iACTvOld[2]+nACTvOld)=iz;
//*(iACTvOld[3]+nACTvOld)=ires+
// *(iparm+NPARM*(iCell-1)+31);
*(iACTvOld[3]+nACTvOld)=ires+
*(iparm+NPARM*(iCell-1)+31)+*(mapSpeed[iz]+iy*NI+ix);
*(iXCTapd[iz]+iy*NI+ix)+=1;
} else {
*(iXCTapd[iz]+iy*NI+ix)=0;
}
}
// <<<<<<<<<<<
}
// <------- END of propagation (2000) -----
// +++++++++++ for high speed ++++++++
mACTv=nACTvOld;
// ------- propagation (1000) -------->
for(i=1;i<=nACTvOld;i++) {
idist=(int)(*(iACTvOld[3]+i)/100);
if (idist<2) continue;
*(jACTv[0]+1)=*(iACTvOld[0]+i);
*(jACTv[1]+1)=*(iACTvOld[1]+i);
*(jACTv[2]+1)=*(iACTvOld[2]+i);
ires=*(iACTvOld[3]+i)-idist*100;
nStep=0;
nbrch=1;
jdist=1;
while (1) {
for (j=1;j<=nbrch;j++) {
jx0=*(jACTv[0]+j);
jy0=*(jACTv[1]+j);
jz0=*(jACTv[2]+j);
mCell=*(mapCell[jz0]+jy0*NI+jx0);
if (mCell==5) iCell5Ex=1;
for (l=0;l<12;l++) {
jx=jx0+iseqx[l];
if ((jx<=-1)||(jx>(NI-1))) continue; // <0 or >=NI
jy=jy0+iseqy[l];
if ((jy<=-1)||(jy>(NJ-1))) continue; // <0 or >=NJ
jz=jz0+iseqz[l];
if ((jz<=-1)||(jz>(NK-1))) continue; // <0 or >=NK
kCell = *(mapCell[jz]+jy*NI+jx);
//if (jx == 64 && jy == 50 && jz == 64) TRACE("\nE AVN %d",kCell);
if (kCell != mCell) continue;
//++++++++ in effective refractory period ? +++++++
// >>>>> aniso: within the ellpisoid ? >>>>>>>>>>>>
if (ANISO==1 && mCell==7) {
dani=local(jx0,jy0,jz0);
//TRACE("\nx,y,z,dani, %2d %2d %2d %f",ix,iy,iz,dani);
// -- if can't solve local coordinates, treat as isotropic -->
if (dani > 0.0001) {
//lctran(iseqx[l],iseqy[l],iseqz[l],dani,xani,yani,zani);
xani=iseqx[l]*tmswf[0][0]+iseqy[l]*tmswf[0][1]+iseqz[l]*tmswf[0][2];
yani=iseqx[l]*tmswf[1][0]+iseqy[l]*tmswf[1][1]+iseqz[l]*tmswf[1][2];
zani=iseqx[l]*tmswf[2][0]+iseqy[l]*tmswf[2][1]+iseqz[l]*tmswf[2][2];
dxani=xani*yaxis[1]*zaxis[2]+yani*yaxis[2]*zaxis[0]
+zani*yaxis[0]*zaxis[1]-zani*yaxis[1]*zaxis[0]
-xani*yaxis[2]*zaxis[1]-yani*yaxis[0]*zaxis[2];
dyani=xaxis[0]*yani*zaxis[2]+xaxis[1]*zani*zaxis[0]
+xaxis[2]*xani*zaxis[1]-xaxis[2]*yani*zaxis[0]
-xaxis[0]*zani*zaxis[1]-xaxis[1]*xani*zaxis[2];
dzani=xaxis[0]*yaxis[1]*zani+xaxis[1]*yaxis[2]*xani
+xaxis[2]*yaxis[0]*yani-xaxis[2]*yaxis[1]*xani
-xaxis[0]*yaxis[2]*yani-xaxis[1]*yaxis[0]*zani;
xani=dxani/dani;
yani=dyani/dani;
zani=dzani/dani;
// itms=maps(ix,iy,iz)+1
//TRACE("\nd %f %f %f %f",dxani,xani,yaxis[0],zaxis[0]);
itms1=*(iXCTapd[jz0]+jy0*NI+jx0);
elp=xani*xani/vt2[itms1]+
yani*yani/vt2[itms1]+
zani*zani/vl2[itms1];
// write(0,*) x,y,z,elp
// TRACE("\n %d %f",itms1,elp);
if (elp > 1.0) continue;
}
}
idltt=ic-*(iXCT[jz]+jy*NI+jx);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) idltt=INFTIME;
// --- change in cycle length ------>
idltc=idltt+*(iXCTOld[jz]+jy*NI+jx)-*(iXCT[jz]+jy*NI+jx);
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME) idltc=0;
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
*(iparm+(1-1)*NPARM+23)>0)
idltc = ic-*(iXCT[jz]+jy*NI+jx)-*(iparm+(1-1)*NPARM+23);
// rdXCT(ic,jx,jy,jz);
// irp = time in phase 2 + mapACT/3 +plateau of potential in phase 3 *idltc/100
// --- absolute refractory period ------>
//irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3
// +*(iparm+(mCell-1)*NPARM+10)*idltc/100;
irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3;
//if (mCell==6)
// irp=(*(iparm+NPARM*(mCell-1)+4))/3;
irel=idltt-irp;
if (irel<=0) continue; // continue;
if (*(iXCT[jz]+jy*NI+jx)==INFTIME && mCell==3) {irel=INFTIME;*(mapAPD[jz]+jy*NI+jx)=0;}
*(mapAPD[jz]+jy*NI+jx) += *(iparm+(mCell-1)*NPARM+10)*idltc*3/100; //added by Zhu
/*
idltt=ic-*(iXCT[jz]+jy*NI+jx);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) idltt=INFTIME;
idltc=idltt+*(iXCTOld[jz]+jy*NI+jx)-*(iXCT[jz]+jy*NI+jx);
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME) idltc=0;
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
*(iparm+(1-1)*NPARM+23)>0)
idltc = ic-*(iXCT[jz]+jy*NI+jx)-*(iparm+(1-1)*NPARM+23);
// rdXCT(ic,jx,jy,jz);
// irp=(*(iparm+NPARM*(kCell-1)+4)+*(mapACT[jz]+jy*NI+jx))/3+
// *(iparm+(kCell-1)*NPARM+10)*idltc/100;
//irp=(*(iparm+NPARM*(kCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3+
// *(iparm+(kCell-1)*NPARM+10)*idltc/100;
irp=(*(iparm+NPARM*(kCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3;
*(mapAPD[jz]+jy*NI+jx) += *(iparm+(kCell-1)*NPARM+10)*idltc*3/100; //added by Zhu
irel=idltt-irp;
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) irel=INFTIME;
*/
irel = 3*irel;
//if (*(iXCTOld[jz]+jy*NI+jx)!=INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
// irel>=*(iparm+NPARM*(kCell-1)+5))
// *(mapAPD[jz]+jy*NI+jx)=0;
if ((irel)<*(iparm+NPARM*(kCell-1)+5)) {
tmp=100+*(iparm+NPARM*(mCell-1)+32)
-irel*(*(iparm+NPARM*(mCell-1)+32))/(*(iparm+NPARM*(mCell-1)+5));
if (tmp!=0) {
ivel = 100*(*(iparm+NPARM*(mCell-1)+31))/tmp;
} else {
ivel=*(iparm+NPARM*(mCell-1)+31);
}
} else {
ivel=*(iparm+NPARM*(kCell-1)+31);
}
*(mapSpeed[jz]+jy*NI+jx)=ivel-*(iparm+NPARM*(kCell-1)+31);//added by Zhu
nStep=nStep+1;
*(kACTv[0]+nStep)=jx;
*(kACTv[1]+nStep)=jy;
*(kACTv[2]+nStep)=jz;
// nStep++;
mACTv=mACTv+1;
*(iACTvOld[0]+mACTv)=jx;
*(iACTvOld[1]+mACTv)=jy;
*(iACTvOld[2]+mACTv)=jz;
*(iACTvOld[3]+mACTv)=ivel+ires;
// mACTv++;
// TRACE(" D%d,",mACTv);
wtXCTm(ic,jx,jy,jz);
//if (jx==101 && jy==77 && jz==6) TRACE("\nD mxcycle=%d at ic=%d, iCell=%d",mxcycle,ic,kCell);
//if (kCell >2 && kCell <6) TRACE("\nD %d %d %d %d %d %d",kCell,jx,jy,jz,ic,mACTv);
//TRACE("\nbreak4 mxcycle=%d NCYCL=%d ",mxcycle, NCYCL);
if (mxcycle>=NCYCL) {
//TRACE("\nbreak5 iSTOP=%d mxcycle=%d,NCYCL=%d",iSTOP, mxcycle, NCYCL);
iSTOP =1;
break;
}
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
excited=1;
}
/*// >>>>>>>> anisotropy >>>>>
if (ANISO==1 && mCell == 7) {
// ltrat==3;
if (*(iXCTapd[jz0]+jy0*NI+jx0) < 3) {
mACTv=mACTv+1;
*(iACTvOld[0]+mACTv)=jx0;
*(iACTvOld[1]+mACTv)=jy0;
*(iACTvOld[2]+mACTv)=jz0;
*(iACTvOld[3]+mACTv)=ivel+ires;
//*(iACTvOld[3]+nACTvOld)=ires+
// *(iparm+NPARM*(iCell-1)+31);
*(iACTvOld[3]+nACTvOld)=ires+
*(iparm+NPARM*(mCell-1)+31)+*(mapSpeed[jz0]+jy0*NI+jx0);
*(iXCTapd[jz0]+jy0*NI+jx0)+=1;
} else {
*(iXCTapd[jz0]+jy0*NI+jx0)=0;
}
}
// <<<<<<<<<<<*/
if (iSTOP ==1) break;
}
if (iSTOP ==1) break;
if (nStep==0) break; // continue;
jdist=jdist+1;
if (jdist>=idist) break; // continue;
for(k=1;k<=nStep;k++) {
*(jACTv[0]+k)=*(kACTv[0]+k);
*(jACTv[1]+k)=*(kACTv[1]+k);
*(jACTv[2]+k)=*(kACTv[2]+k);
}
nbrch=nStep;
nStep=0;
}
}
//TRACE("\nbreak5 iSTOP=%d ",iSTOP);
if (iSTOP ==1) break;
// <------- END of propagation (1000) -------------
if (excited == 0) break;
nACTv=mACTv;
// nblck=nblck+nACTv;
for(i=1;i<=nACTv;i++) {
for(j=0;j<4;j++) {
*(iACTv[j]+i)=*(iACTvOld[j]+i);
}
}
} // END of whole while loop
TRACE("\nmxcycle=%d",mxcycle);
mxcycle++; // hui
// add HB info
for (itmp=0; itmp<50*ND; itmp++) {
for (tmp=0;tmp<NCYCL;tmp++) {
vHB[tmp][itmp]=0;
}
}
for (itmp=0; itmp<nHB; itmp++) {
l=iHB[0][itmp];
j=iHB[1][itmp];
k=iHB[2][itmp];
if (itmp==0) i=*(locXCT[k]+j*NJ+l); // Consider only the point near AV Node
for (tmp=0;tmp<mxcycle;tmp++) {
vHB[tmp][itmp]=*(mapXCTm[tmp]+i);
}
}
// Save
CFile f;
CFileException e;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'x');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'t');
if (myid==0){
if (!f.Open( dataPath+"tour.xct ", CFile::modeCreate | CFile::modeWrite, &e )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e.m_cause << "\n";
#endif
}
//f.Write(&mxcycle,2);
f.Write(&miBN,2);
f.Write(&ic,2);
f.Write(&totalCell,4);
for(j=0;j<mxcycle;j++) {
for(i=0;i<totalCell;i++) f.Write(mapXCTm[j]+i,2);
}
f.Close();
};
/*
FILE * iow;
iow=fopen("fpMapXCTm.txt","wt");
if (iow == NULL) {
fprintf(stderr, "Open .txt for write failed! \n");
return;
}
long temploc;
temploc=*(locXCT[45]+22*NJ+33);
fprintf(iow,"33 22 45 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[40]+30*NJ+32);
fprintf(iow,"32 30 40 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[48]+20*NJ+30);
fprintf(iow,"30 20 48 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[56]+8*NJ+26);
fprintf(iow,"26 8 56 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[62]+10*NJ+21);
fprintf(iow,"21 10 62 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[62]+30*NJ+13);
fprintf(iow,"13 30 62 %3d\n",*(mapXCTm[0]+temploc));
for(l=0;l<mxcycle;l++) {
fprintf(iow,"l=%d\n",l);
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=NI-1;i>-1;i--) {
temploc = *(locXCT[k]+j*NJ+i);
if (temploc < 0) fprintf(iow," ");
else fprintf(iow,"%3d ",*(mapXCTm[l]+temploc));
}
fprintf(iow,"j=%d\n",j);
}
fprintf(iow,"k=%d\n",k);
}
}
fclose(iow);
*/
for(i=0;i<4;i++) {
free(iACTv[i]);
free(iACTvOld[i]);
free(jACTv[i]);
free(kACTv[i]);
}
for(i=0;i<NK;i++) {
free(iXCT[i]);
free(iXCTapd[i]);
free(iXCTOld[i]);
}
}
// ---- BB conduction ----
void bbDLYm(short int i00,short int j00,short int k00) {
short int ii;
for(ii=0;ii<nttl;ii++) {
if (i00!=ipttl[0][ii]) continue;
if (j00!=ipttl[1][ii]) continue;
if (k00!=ipttl[2][ii]) continue;
// Add for BB interval by hui wang
ibbSTEP=ipttl[3][ii];
// End of add for BB interval by hui wang
ibbDLY=100*ND/(ipttl[3][ii]+1);
if((ibbDLY*ipttl[3][ii])<100*ND) ibbDLY+=1;
}
}
// ********** find the time since last excitation*********
void rdXCTm(short int icc,short int i00,short int j00,short int k00) {
short int ncyc,n1cyc;
idltt=INFTIME; /*<Comment by ALF> period between 2 continuous excitation*/
idltc=0; /*<Comment by ALF> delta of 2 periods */
short int n;
long locxct;
locxct=*(locXCT[k00]+j00*NI+i00);
if(locxct<0) return;
for(n=NCYCL-1;n>=0;n--) {
ncyc=*(mapXCTm[n]+locxct);
if (icc>=ncyc) {
idltt=icc-ncyc;
break;
}
}
if ((n<=0)||(n>=NCYCL-1)) return; /*<Comment by ALF> To prevent a over-cross array operation*/
n1cyc=*(mapXCTm[n+1]+locxct);
if (n1cyc==INFTIME) return;
idltc=n1cyc-ncyc-ncyc+*(mapXCTm[n-1]+locxct);
return;
}
// ******* sub write XCT **********
void wtXCTm(short int icc,short int i00,short int j00,short int k00)
{
short int n;
long locxct=*(locXCT[k00]+j00*NI+i00);
if(locxct<0) return;
for(n=0;n<NCYCL;n++) {
if (*(mapXCTm[n]+locxct)!=INFTIME) continue;
*(mapXCTm[n]+locxct)=icc;
if (mxcycle<n) mxcycle=n;
break;
}
return;
}
// Body surface potential calculation
void BSPcalm(void) {
void BSPitmm(short int, short int **, float *, float *, float *, float *, float **, float **,short int, float *, float *);
//
vector<float> epicPOT[TSTEP];
short int nVCG,BSPm,mTime,iTime,i,j;
short int nsnrt;
float *VCGs[3],*VCGs_reduce[3];//*VCGs_reduce[3] by sf 090622
float eff;
float *endoHnnA;
//
float *endoPOT[TSTEP];//*endoPOT_reduce[TSTEP];//*endoPOT_reduce[TSTEP] by sf 090622
HFILE hFp;
short int index;
int nn,n0,n1,n2,ni;
float pi=3.1415926;
short int *tnd[3];
// cpu CPU
//int cpunum = omp_get_num_threads();
int cpunum = omp_get_num_procs();
//int cpunum = 4;
// CPU
omp_set_num_threads(cpunum);
for(i=0;i<TSTEP;i++) {
endoPOT[i]=(float *) malloc(2*NENDO*ND3*4);
//endoPOT_reduce[i]=(float *) malloc(2*NENDO*ND3*4);//by sf 090622
if(endoPOT[i]==NULL){
cout<<"Out of memory !\n";
exit(1);
}
}
for(i=0;i<TSTEP;i++) {
for(ni=0;ni<2*NENDO*ND3;ni++) {
*(endoPOT[i]+ni)=(float)0;
//*(endoPOT_reduce[i]+ni)=(float)0;
}
}
endoHnnA=(float *) malloc(2*NENDO*ND3*4);
if((endoHnnA==NULL)) {
cout<<"Out of memory !\n";
exit(1);
}
for(ni=0;ni<2*NENDO*ND3;ni++) *(endoHnnA+ni)=(float)0;
// malloc epicardial potential array
//ASSERT(Nepic != 0);
if(Nepic == 0){
fprintf(stdout, "Nepic == 0");
exit(1);
}
for (i=0; i<TSTEP; ++i) {
epicPOT[i].resize(Nepic,0);
}
for(i=0;i<3;i++) {
VCGs[i]=(float *)malloc(TSTEP*4);
VCGs_reduce[i]=(float *)malloc(TSTEP*4);//by sf 090622
if (VCGs[i]==NULL) {
cout<<"Out of memory !\n";
exit(1);
flag_flop=1;
return;
}
}
for (i=0;i<3;i++){
VCG[i]=(float)0;
}
for (i=0;i<3;i++) {
for (j=0;j<TSTEP;j++) {
*(VCGs[i]+j)=(float)0;
*(VCGs_reduce[i]+j)=(float)0;//by sf 090622
}
}
// matrix data of endo-body surface -
for(i=0;i<3;i++) {
tnd[i] = (short int *) malloc((NL-2)*2*2);
if(tnd[i]==NULL) {
cout<<"Out of memory !\n";
exit(1);
}
}
// tnd
hFp=_lopen(dataPath+"tour.tnd ",OF_READ);
if (hFp==HFILE_ERROR) {
fprintf(stdout,"Can not open tnd file ! !\n");
fflush(stdout);
flag_flop=1;
return;
}
for(i=0;i<(NL-2)*2;i++) {
_lread(hFp,tnd[0]+i,2);
_lread(hFp,tnd[1]+i,2);
_lread(hFp,tnd[2]+i,2);
}
_lclose(hFp);
float ax,ay,az,x0,y0,z0,x1,y1,z1,x2,y2,z2,a01,a12,a20,s,h;
float x3,y3,z3,u3,x4,y4,z4,u4,x5,y5,z5,u5;
short int ISGN=1;
float *hnn;
hnn=(float *) malloc((NL-2)*2*(NL-2)*2*4);
if (hnn==NULL) {
cout<<"Out of memory !\n";
exit(1);
}
for(ni=0;ni<(NL-2)*2*(NL-2)*2;ni++) {
*(hnn+ni)=(float)0;
}
ni=0;
for(nn=0;nn<(NL-2)*2;nn++) {
// ---- measurement location -------
n0=*(tnd[0]+nn)-1; /*<Comment by ALF> triangle node array */
n1=*(tnd[1]+nn)-1;
n2=*(tnd[2]+nn)-1;
ax=(*(r[0]+n0)+*(r[0]+n1)+*(r[0]+n2))/3; /*<Comment by ALF> distance from center of triangle to view point */
ay=(*(r[1]+n0)+*(r[1]+n1)+*(r[1]+n2))/3;
az=(*(r[2]+n0)+*(r[2]+n1)+*(r[2]+n2))/3;
for(i=0;i<(NL-2)*2;i++) {
if (i==nn) {
*(hnn+ni)=0.5;
ni++;
continue;
}
/*<Comment by ALF> xn,yn,zn is the co-ordinate by set the center of triangle as the the origin*/
n0=*(tnd[0]+i)-1;
x0=*(r[0]+n0)-ax;
y0=*(r[1]+n0)-ay;
z0=*(r[2]+n0)-az;
n1=*(tnd[1]+i)-1;
x1=*(r[0]+n1)-ax;
y1=*(r[1]+n1)-ay;
z1=*(r[2]+n1)-az;
n2=*(tnd[2]+i)-1;
x2=*(r[0]+n2)-ax;
y2=*(r[1]+n2)-ay;
z2=*(r[2]+n2)-az;
a01=acos((x0*x1+y0*y1+z0*z1)/sqrt(x0*x0+y0*y0+z0*z0)/sqrt(x1*x1+y1*y1+z1*z1));
a12=acos((x1*x2+y1*y2+z1*z2)/sqrt(x1*x1+y1*y1+z1*z1)/sqrt(x2*x2+y2*y2+z2*z2));
a20=acos((x2*x0+y2*y0+z2*z0)/sqrt(x2*x2+y2*y2+z2*z2)/sqrt(x0*x0+y0*y0+z0*z0));
s=(a01+a12+a20)/2;
h=tan(s/2)*tan((s-a01)/2)*tan((s-a12)/2)*tan((s-a20)/2);
if (h<0) h=-h;
s=sqrt(h);
h=atan(s)/pi;
*(hnn+ni)=-h;
ni++;
}
}
float *endoHnnB,*endoHnnC;
endoHnnB=(float *) malloc(NENDO*ND3*(NL-2)*2*4);
endoHnnC=(float *) malloc(NENDO*ND3*(NL-2)*2*4);
if ((endoHnnB==NULL)||(endoHnnC==NULL)) {
cout<<"Out of memory !\n";
exit(1);
}
for(ni=0;ni<NENDO*ND3*(NL-2)*2;ni++) {
*(endoHnnB+ni)=(float)0;
*(endoHnnC+ni)=(float)0;
}
for(nn=0;nn<NendoB;nn++) {
// measurement location
ax=HRTx0+endoBx[nn]*tmswf[0][0]+endoBy[nn]*tmswf[0][1]+endoBz[nn]*tmswf[0][2];
ay=HRTy0+endoBx[nn]*tmswf[1][0]+endoBy[nn]*tmswf[1][1]+endoBz[nn]*tmswf[1][2];
az=HRTz0+endoBx[nn]*tmswf[2][0]+endoBy[nn]*tmswf[2][1]+endoBz[nn]*tmswf[2][2];
for(i=0;i<(NL-2)*2;i++) {
n0=*(tnd[0]+i)-1;
x0=*(r[0]+n0)-ax;
y0=*(r[1]+n0)-ay;
z0=*(r[2]+n0)-az;
n1=*(tnd[1]+i)-1;
x1=*(r[0]+n1)-ax;
y1=*(r[1]+n1)-ay;
z1=*(r[2]+n1)-az;
n2=*(tnd[2]+i)-1;
x2=*(r[0]+n2)-ax;
y2=*(r[1]+n2)-ay;
z2=*(r[2]+n2)-az;
x3=(z2*x0-z0*x2)*(x0*y1-x1*y0)-(x2*y0-x0*y2)*(z0*x1-z1*x0);
y3=(x2*y0-x0*y2)*(y0*z1-y1*z0)-(y2*z0-y0*z2)*(x0*y1-x1*y0);
z3=(y2*z0-y0*z2)*(z0*x1-z1*x0)-(z2*x0-z0*x2)*(y0*z1-y1*z0);
u3=(y2*z0-y0*z2)*(y0*z1-y1*z0)+(z2*x0-z0*x2)*(z0*x1-z1*x0)+(x2*y0-x0*y2)*(x0*y1-x1*y0);
a01=atan(-sqrt(x3*x3+y3*y3+z3*z3)/u3);
x4=(z0*x1-z1*x0)*(x1*y2-x2*y1)-(x0*y1-x1*y0)*(z1*x2-z2*x1);
y4=(x0*y1-x1*y0)*(y1*z2-y2*z1)-(y0*z1-y1*z0)*(x1*y2-x2*y1);
z4=(y0*z1-y1*z0)*(z1*x2-z2*x1)-(z0*x1-z1*x0)*(y1*z2-y2*z1);
u4=(y0*z1-y1*z0)*(y1*z2-y2*z1)+(z0*x1-z1*x0)*(z1*x2-z2*x1)+(x0*y1-x1*y0)*(x1*y2-x2*y1);
a12=atan(-sqrt(x4*x4+y4*y4+z4*z4)/u4);
x5=(z1*x2-z2*x1)*(x2*y0-x0*y2)-(x1*y2-x2*y1)*(z2*x0-z0*x2);
y5=(x1*y2-x2*y1)*(y2*z0-y0*z2)-(y1*z2-y2*z1)*(x2*y0-x0*y2);
z5=(y1*z2-y2*z1)*(z2*x0-z0*x2)-(z1*x2-z2*x1)*(y2*z0-y0*z2);
u5=(y1*z2-y2*z1)*(y2*z0-y0*z2)+(z1*x2-z2*x1)*(z2*x0-z0*x2)+(x1*y2-x2*y1)*(x2*y0-x0*y2);
a20=atan(-sqrt(x5*x5+y5*y5+z5*z5)/u5);
s=(a01+a12+a20-pi)*ISGN; // ISGN=1 only; since ISGN=-1 is impossible in our case
h=tan(s/2)*tan((s-a01)/2)*tan((s-a12)/2)*tan((s-a20)/2);
if (h<0) h=-h;
s=sqrt(h);
h=atan(s)/pi;
*(endoHnnB+nn)=h;
}
}
for(nn=0;nn<NendoC;nn++) {
// measurement location
ax=HRTx0+endoCx[nn]*tmswf[0][0]+endoCy[nn]*tmswf[0][1]+endoCz[nn]*tmswf[0][2];
ay=HRTy0+endoCx[nn]*tmswf[1][0]+endoCy[nn]*tmswf[1][1]+endoCz[nn]*tmswf[1][2];
az=HRTz0+endoCx[nn]*tmswf[2][0]+endoCy[nn]*tmswf[2][1]+endoCz[nn]*tmswf[2][2];
for(i=0;i<(NL-2)*2;i++) {
n0=*(tnd[0]+i)-1;
x0=*(r[0]+n0)-ax;
y0=*(r[1]+n0)-ay;
z0=*(r[2]+n0)-az;
n1=*(tnd[1]+i)-1;
x1=*(r[0]+n1)-ax;
y1=*(r[1]+n1)-ay;
z1=*(r[2]+n1)-az;
n2=*(tnd[2]+i)-1;
x2=*(r[0]+n2)-ax;
y2=*(r[1]+n2)-ay;
z2=*(r[2]+n2)-az;
x3=(z2*x0-z0*x2)*(x0*y1-x1*y0)-(x2*y0-x0*y2)*(z0*x1-z1*x0);
y3=(x2*y0-x0*y2)*(y0*z1-y1*z0)-(y2*z0-y0*z2)*(x0*y1-x1*y0);
z3=(y2*z0-y0*z2)*(z0*x1-z1*x0)-(z2*x0-z0*x2)*(y0*z1-y1*z0);
u3=(y2*z0-y0*z2)*(y0*z1-y1*z0)+(z2*x0-z0*x2)*(z0*x1-z1*x0)+(x2*y0-x0*y2)*(x0*y1-x1*y0);
a01=atan(-sqrt(x3*x3+y3*y3+z3*z3)/u3);
x4=(z0*x1-z1*x0)*(x1*y2-x2*y1)-(x0*y1-x1*y0)*(z1*x2-z2*x1);
y4=(x0*y1-x1*y0)*(y1*z2-y2*z1)-(y0*z1-y1*z0)*(x1*y2-x2*y1);
z4=(y0*z1-y1*z0)*(z1*x2-z2*x1)-(z0*x1-z1*x0)*(y1*z2-y2*z1);
u4=(y0*z1-y1*z0)*(y1*z2-y2*z1)+(z0*x1-z1*x0)*(z1*x2-z2*x1)+(x0*y1-x1*y0)*(x1*y2-x2*y1);
a12=atan(-sqrt(x4*x4+y4*y4+z4*z4)/u4);
x5=(z1*x2-z2*x1)*(x2*y0-x0*y2)-(x1*y2-x2*y1)*(z2*x0-z0*x2);
y5=(x1*y2-x2*y1)*(y2*z0-y0*z2)-(y1*z2-y2*z1)*(x2*y0-x0*y2);
z5=(y1*z2-y2*z1)*(z2*x0-z0*x2)-(z1*x2-z2*x1)*(y2*z0-y0*z2);
u5=(y1*z2-y2*z1)*(y2*z0-y0*z2)+(z1*x2-z2*x1)*(z2*x0-z0*x2)+(x1*y2-x2*y1)*(x2*y0-x0*y2);
a20=atan(-sqrt(x5*x5+y5*y5+z5*z5)/u5);
s=(a01+a12+a20-pi)*ISGN; // ISGN=1 only; since ISGN=-1 is impossible in our case
h=tan(s/2)*tan((s-a01)/2)*tan((s-a12)/2)*tan((s-a20)/2);
if (h<0) h=-h;
s=sqrt(h);
h=atan(s)/pi;
*(endoHnnC+nn)=h;
}
}
//TRACE("\nNendoB=%d NendoC=%d",NendoB,NendoC);
printf("\nNendoB=%d NendoC=%d\n",NendoB,NendoC);
bsptime[0]=clock();
printf("before BSPitmm-begin=%f,\n",(bsptime[0]-starttime)/CLK_TCK);
cout<<"Num of CPU: "<<cpunum<<endl;
//-------------------- modified by ALF at 2008-8-19 begin -------------------->
//add: store the solid angle of epicardial triangle
vector<float> epicHnn;
epicHnn.resize(Nepic*ND3*(NL-2)*2, 0);
for(nn=0;nn<Nepic;nn++) {
// ---- measurement location -------
ax=HRTx0+epicX[nn]*tmswf[0][0]+epicY[nn]*tmswf[0][1]+epicZ[nn]*tmswf[0][2];
ay=HRTy0+epicX[nn]*tmswf[1][0]+epicY[nn]*tmswf[1][1]+epicZ[nn]*tmswf[1][2];
az=HRTz0+epicX[nn]*tmswf[2][0]+epicY[nn]*tmswf[2][1]+epicZ[nn]*tmswf[2][2];
for(i=0;i<(NL-2)*2;i++) {
n0=*(tnd[0]+i)-1;
x0=*(r[0]+n0)-ax;
y0=*(r[1]+n0)-ay;
z0=*(r[2]+n0)-az;
n1=*(tnd[1]+i)-1;
x1=*(r[0]+n1)-ax;
y1=*(r[1]+n1)-ay;
z1=*(r[2]+n1)-az;
n2=*(tnd[2]+i)-1;
x2=*(r[0]+n2)-ax;
y2=*(r[1]+n2)-ay;
z2=*(r[2]+n2)-az;
x3=(z2*x0-z0*x2)*(x0*y1-x1*y0)-(x2*y0-x0*y2)*(z0*x1-z1*x0);
y3=(x2*y0-x0*y2)*(y0*z1-y1*z0)-(y2*z0-y0*z2)*(x0*y1-x1*y0);
z3=(y2*z0-y0*z2)*(z0*x1-z1*x0)-(z2*x0-z0*x2)*(y0*z1-y1*z0);
u3=(y2*z0-y0*z2)*(y0*z1-y1*z0)+(z2*x0-z0*x2)*(z0*x1-z1*x0)+(x2*y0-x0*y2)*(x0*y1-x1*y0);
a01=atan(-sqrt(x3*x3+y3*y3+z3*z3)/u3);
x4=(z0*x1-z1*x0)*(x1*y2-x2*y1)-(x0*y1-x1*y0)*(z1*x2-z2*x1);
y4=(x0*y1-x1*y0)*(y1*z2-y2*z1)-(y0*z1-y1*z0)*(x1*y2-x2*y1);
z4=(y0*z1-y1*z0)*(z1*x2-z2*x1)-(z0*x1-z1*x0)*(y1*z2-y2*z1);
u4=(y0*z1-y1*z0)*(y1*z2-y2*z1)+(z0*x1-z1*x0)*(z1*x2-z2*x1)+(x0*y1-x1*y0)*(x1*y2-x2*y1);
a12=atan(-sqrt(x4*x4+y4*y4+z4*z4)/u4);
x5=(z1*x2-z2*x1)*(x2*y0-x0*y2)-(x1*y2-x2*y1)*(z2*x0-z0*x2);
y5=(x1*y2-x2*y1)*(y2*z0-y0*z2)-(y1*z2-y2*z1)*(x2*y0-x0*y2);
z5=(y1*z2-y2*z1)*(z2*x0-z0*x2)-(z1*x2-z2*x1)*(y2*z0-y0*z2);
u5=(y1*z2-y2*z1)*(y2*z0-y0*z2)+(z1*x2-z2*x1)*(z2*x0-z0*x2)+(x1*y2-x2*y1)*(x2*y0-x0*y2);
a20=atan(-sqrt(x5*x5+y5*y5+z5*z5)/u5);
s=(a01+a12+a20-pi)*ISGN; // ISGN=1 only; since ISGN=-1 is impossible in our case
h=tan(s/2)*tan((s-a01)/2)*tan((s-a12)/2)*tan((s-a20)/2);
if (h<0) h=-h;
s=sqrt(h);
h=atan(s)/pi;
epicHnn[nn] = h;
}
}
//---- body surface potential distribution [time(msec)]
nsnrt=mBCL;
nTimeStep=0;
nVCG=0;//nVCG_old=0;//by sf 090401
itbuf=0;
bufGRD=(float)0;
for(short int n=0;n<2;n++) {
for(short int m=0;m<3;m++) {
bufVCG[n][m]=(float)0;
}
}
mTime=maxXctStep*3;
iTime=3*ND;
//printf("\nmTime1=%d",(mTime/3+2));
gatheralldpl = (float**)malloc((mTime/3+1)*sizeof(float)); //for 1--mTime/3
gatherallijk = (int**)malloc((mTime/3+1)*sizeof(int));
countallijk = (int*)malloc((mTime/3+1)*sizeof(int));
countallijk_reduce = (int*)malloc((mTime/3+1)*sizeof(int));
for(i=0;i<=mTime;i=i+3)
{
*(countallijk+i/3)=0; //by sf 090621
*(countallijk_reduce+i/3)=0;
}
// isumloopsiTime
// iloops[3]0: [0,0]dipole,dipole
// 1: dipole
// 2:dipoleiTime
// itask[2]0:MPI
// 1:iTime,0
for(i=0;i<2;i=i+1)
{
itask[i] = (int *)malloc((mTime/3+1)*sizeof(int));
for(j=0;j<=mTime;j=j+3)
{
*(itask[i]+j/3)=777;
}
}
for(i=0;i<3;i=i+1)
{
iloops[i] = (int *)malloc((mTime/3+1)*sizeof(int));
for(j=0;j<=mTime;j=j+3)
{
*(iloops[i]+j/3)=-8;
}
}
for(j=0;j<=mTime;j=j+3)
{
*(iloops[2]+j/3)=j;
}
//printf("before BSPitmm-begin=%f,\n",(bsptime[0]-starttime)/CLK_TCK);
iTimebegin=1,iTimeend=mTime/3;
int loopnum=0,loop=0;
bsptime[0] =clock();
printf("mTime:%d iTimebegin:%d iTimeend:%d\n", mTime, iTimebegin, iTimeend);
#pragma omp parallel for // OpenMP--begin //by sf 090621iTimedipole
for(i=iTime;i<=mTime;i=i+3)
{
int tid=omp_get_thread_num(),tnum=omp_get_num_threads();
corenum=tnum;
*(iloops[0]+i/3)=BSPitmmcount(i);
*(iloops[1]+i/3)=*(iloops[0]+i/3);//dipole
//isumdipoles=isumdipoles+*(iloops[0]+i/3);//myid=0dipole,why?god save me;:,
*(itask[0]+i/3)=i;
if (*(iloops[0]+i/3)>0)
{
//printf("malloc j=%d,myid=%d,*(countallijk+i/3)=%d,*(countallijk_reduce+j/3)=%d\n",j,myid,*(countallijk+j/3),*(countallijk_reduce+j/3));
gatherallijk[i/3] = (int*)malloc(*(iloops[0]+i/3)*3*sizeof(int));
gatheralldpl[i/3] = (float*)malloc(*(iloops[0]+i/3)*3*sizeof(float));
for(j=0;j<*(iloops[0]+i/3);j=j+1)
{
*(gatherallijk[i/3]+j)=0;
*(gatheralldpl[i/3]+j)=float(0.0);
};
}
//printf("%d iloops\n",*(iloops[0]+i/3));//printf("iTime=%d,loopcount=%d\n",loop,*(iloops+loop/3));
}
/*
for(i=1;i<=iTimeend;i++)
{
//printf("i=%d,iloops[0]=%d,itask[0]=%d,itask[1]=%d,myid=%d\n",i,*(iloops[0]+i),*(itask[0]+i),*(itask[1]+i),myid);
isumdipoles=isumdipoles+*(iloops[0]+i);
}
*/
//ofstream out("E:\\out.txt");
//iTimebegin=1,iTimeend=mTime/3;
//dipole *(iloops[1]+0)
for(loop=1;loop<=iTimeend;loop++)
{
loopnum=loop;
for(i=loop+1;i<=iTimeend;i++){
if (*(iloops[1]+loopnum)<*(iloops[1]+i)){
loopnum=i;
}
}
*(iloops[1]+0)=*(iloops[1]+loopnum);
*(iloops[2]+0)=*(iloops[2]+loopnum);
*(iloops[1]+loopnum)=*(iloops[1]+loop);
*(iloops[2]+loopnum)=*(iloops[2]+loop);
*(iloops[1]+loop)=*(iloops[1]+0);
*(iloops[2]+loop)=*(iloops[2]+0);
//cout<<"dipole:"<<*(iloops[1]+loop)<<", dipoleiTime"<<*(iloops[2]+loop)<<endl;
//cout<<"dipole:"<<*(iloops[1]+loop)<<", dipoleiTime"<<*(iloops[2]+loop)<<endl;
//out<<""<<*(iloops[1]+loop)<<","<<endl;
};
//out.close();
//091212--100211 sumdipole=dipole,dipolep=dipole,dipole0=MPI0 dipole,
//iteration0=MPI0 iteration,turn=0,mTimeby0=[1,mTimeby0]--;(mTimeby0,iTimeEND]--0,count=1,head=
int sumdipole=0,dipolep=0,dipole0=0,iteration0=0,turn=0,mTimeby0=0,count=1,head=1;
// GPUCPUdiploe
int dipole_gpu = 0, dipole_cpu = 0;
int tail,tailbegin,tailend,exi,ldipole,count1; //[1,gpuend) [gpuend,cpuend](cpuend,tail)[tail,*(itask[1]+0)]
//dipole
for(loop=1;loop<=iTimeend;loop++)
{
sumdipole=sumdipole+*(iloops[0]+loop);
};
//dipole
dipolep=sumdipole/cpunum;
cout<<"dipolep = "<<dipolep<<" sumdiploep = "<<sumdipole<<endl;
// GPUCPU
int gpu_op_num, cpu_op_num;
if(cpunum % 2 == 0){
cpu_op_num = gpu_op_num = cpunum / 2;
}else{
// GPU
gpu_op_num = cpunum / 2 + 1;
cpu_op_num = cpunum / 2;
}
// isumloopsiTime
// iloops[3]0: [0,0]dipole,dipole
// 1: dipole
// 2:dipoleiTime
// itask[2]0:MPI
// 1:iTime,0
/*
dipole0=0;
for(loop=iTimeend;loop>=1;loop--)//0<=dipoleiteration
{
dipole0=dipole0+*(iloops[1]+loop);
if (dipole0 > dipolep)
{
break;
};
*(itask[0]+loop)=0;
};
*/
//mTimeby0=loop;//mTimeby0 [1,mTimeby0]
//mTimeby0=iTimeend;
int msumdipole=0,mdipolep=0,tdipole=0,predictend=0,gpuend=1,cpuend=iTimeend;
//msumdipole=MPIdipole,mdipolep=dipole,
//gpuspeed=gpu,tdipole,predictend=(t0);
//[1,gpuend),(cpuend,iTimeend];[gpuend,cpuend],:cpuend-gpuend+1;
cout<<"iTimeend:"<<iTimeend<<endl;
turn=1;
//for(loop=1;loop<=mTimeby0;loop++)
for(loop=1;loop<=iTimeend;loop++)
{
*(itask[0]+loop)=turn;
turn++;
if (turn>cpunum-1) turn=1;
};
// isumloopsiTime
// iloops[3]0: [0,0]dipole,dipole
// 1: dipole
// 2:dipoleiTime
// itask[2]0:MPI
// 1:iTime,0
//int msumdipole=0,mdipolep=0,tdipole=0,predictend=0,gpuend=1,cpuend=iTimeend;
//msumdipole=MPIdipole,mdipolep=dipole,
//gpuspeed=gpu,tdipole,predictend=(t0);
//[1,gpuend),(cpuend,iTimeend];[gpuend,cpuend],:cpuend-gpuend+1;
head=1;
for(loop=1;loop<=iTimeend;loop++)
{
//printf("i=%d,iloops[0]=%d,itask[0]=%d,itask[1]=%d,myid=%d\n",loop,*(iloops[0]+loop),*(itask[0]+loop),*(itask[1]+loop),myid);
//if (myid==*(itask[0]+loop))
{
*(itask[1]+head)=*(iloops[2]+loop);
head++;
msumdipole=msumdipole+*(iloops[0]+*(itask[1]+loop)/3);
};
};
//*(itask[1])iTime,*(itask[1]+0),
*(itask[1]+0)=head-1;
cout<<"*(itask[1]+0):"<<*(itask[1]+0)<<endl;
// iloops[3]0: [0,0]dipole,dipole
// 1: dipole
// 2:dipoleiTime
// itask[2]0:MPI
// 1:iTime,0
// mdipolep OPdiploe
mdipolep=msumdipole*gpuspeed/(gpuspeed+corenum-1);
for(loop=1;loop<=*(itask[1]+0);loop++)
{
// tdipole
tdipole=tdipole+*(iloops[0]+*(itask[1]+loop)/3);
if (tdipole>=mdipolep)
break;
};
// predictendP
predictend=loop-1;
//printf("gpuspeed=%d,corenum=%d,loop=%d,predictend=%d,tdipole=%d,mdipolep=%d\n",gpuspeed,corenum,loop,predictend,tdipole,mdipolep);
int gpuLoop, cpuLoop;
gpuLoop = 1;
cpuLoop = iTimeend;
#pragma omp parallel shared(gpuLoop, cpuLoop) //private(iTime) //+ //by sf 090828 OpenMP--begin
{
int tid=omp_get_thread_num();
int tnum=omp_get_num_threads(),myloop,myiTime;
int s=2*tid-1,k=2*(tnum-1);
double tbegin=clock(),tend=0.0;
int dipolesum=0,iterationsum=0;
//printf("11(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
if (useGPU==1 && tid==0 && GPUnum>0)
{
//printf("112(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
gpu_transdata(epicX_old,epicY_old,epicZ_old,tnd,r,rn,endoBx,endoBy,endoBz,endoCx,endoCy,endoCz,tmswf);
//printf("113(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
}
threadnum=tnum;
//printf("13(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
if (tid==0)
{
//myloop=1;//myloop,,,3
//while (myloop < gpuend)
while(gpuLoop)
{
//
//dipolesum=dipolesum+*(iloops[0]+*(itask[1]+myloop)/3);
dipolesum=dipolesum+*(iloops[0]+*(itask[1]+gpuLoop)/3);
iterationsum++;
//printf("gsNO:=%d,itask[1]+0=%d,myid=%d,tid=%d,useGPU=%d,Gnum=%d,iTime=%d,dipole=%d\n",myloop,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop),*(iloops[0]+*(itask[1]+myloop)/3));
//BSPitmm(*(itask[1]+myloop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+myloop)/3-1][0]);
BSPitmm(*(itask[1]+gpuLoop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+gpuLoop)/3-1][0]);
//myloop++;
gpuLoop++;
if(gpuLoop >= predictend){
break;
}
};
}
else
{
//myloop=(tail-1)-(tid-1);
//while (myloop > cpuend)
while(cpuLoop)
{
dipolesum=dipolesum+*(iloops[0]+*(itask[1]+cpuLoop)/3);
iterationsum++;
//printf("csNO:=%d,itask[1]+0=%d,myid=%d,tid=%d,useGPU=%d,Gnum=%d,iTime=%d,dipole=%d\n",myloop,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop),*(iloops[0]+*(itask[1]+myloop)/3));
//BSPitmm(*(itask[1]+myloop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+myloop)/3-1][0]);
BSPitmm(*(itask[1]+cpuLoop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+cpuLoop)/3-1][0]);
//s=k-s;myloop=myloop-s;//myloop=myloop-(tnum-1);
cpuLoop--;
if(cpuLoop <= predictend){
break;
}
};
};
}
/*
//for(i=1;i<=iTimeend;i++)
//{
// printf("i=%d,iloops[0]=%d,itask[0]=%d,itask[1]=%d,myid=%d\n",i,*(iloops[0]+i),*(itask[0]+i),*(itask[1]+i),myid);
//}
int gwindow=1,cwindow=3;
tail=*(itask[1]+0);
gpuend=predictend-gwindow;//36
cpuend=predictend+(corenum-1)*cwindow;
tdipole=0;
// Pdiploe
ldipole=*(iloops[0]+*(itask[1]+predictend)/3)*1.2;
for(loop=*(itask[1]+0);loop>=1;loop--)
{
//printf("e loop=%d,tail=%d,*(itask[1]+loop)=%d,*(itask[1]+tail)=%d,myid=%d,tdipole=%d,ldipole=%d\n",loop,tail,*(itask[1]+loop),*(itask[1]+tail),myid,tdipole,ldipole);
//
if ( *(iloops[0]+*(itask[1]+loop)/3) > 30 )
{
tdipole=tdipole+*(iloops[0]+*(itask[1]+loop)/3);
if (tdipole > ldipole) break;
exi=*(itask[1]+loop);
*(itask[1]+loop)=*(itask[1]+tail);
*(itask[1]+tail)=exi;
//printf("exi loop=%d,tail=%d,*(itask[1]+loop)=%d,*(itask[1]+tail)=%d,myid=%d,tdipole=%d,ldipole=%d\n",loop,tail,*(itask[1]+loop),*(itask[1]+tail),myid,tdipole,ldipole);
tail--;
}
};
if (gpuend<=0) gpuend=1;
if (cpuend>*(itask[1]+0)) cpuend=*(itask[1]+0);
if (tail <= cpuend) tail=cpuend+1;
printf("dipolep=%d,sumdipole=%d,MPI-its=%d,msumdipole=%d,myid=%d,pre-end=%d,gpuend=%d,cpuend=%d\n",dipolep,sumdipole,*(itask[1]+0),msumdipole,myid,predictend,gpuend,cpuend);
if (threadnum>0) omp_set_num_threads(threadnum);
count=gpuend;iTimebegin=gpuend;iTimeend=cpuend;
count1=tail;tailbegin=tail;tailend=*(itask[1]+0);
printf("tail=%d,tailbegin=%d,tailend=%d,ldipole=%d,count1=%d,myid=%d\n",tail,tailbegin,tailend,ldipole,count1,myid);
//for(i=1;i<=mTime/3;i++)
//{
// printf("ii=%d,iloops[0]=%d,itask[0]=%d,itask[1]=%d,myid=%d\n",i,*(iloops[0]+i),*(itask[0]+i),*(itask[1]+i),myid);
//}
double MPItimebegin=clock(),MPItimeend=0.0;
#pragma omp parallel //private(iTime) //+ //by sf 090828 OpenMP--begin
{
int tid=omp_get_thread_num(),tnum=omp_get_num_threads(),myloop,myiTime;
int s=2*tid-1,k=2*(tnum-1);
double tbegin=clock(),tend=0.0;
int dipolesum=0,iterationsum=0;
//printf("11(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
if (useGPU==1 && tid==0 && GPUnum>0)
{
//printf("112(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
gpu_transdata(epicX_old,epicY_old,epicZ_old,tnd,r,rn,endoBx,endoBy,endoBz,endoCx,endoCy,endoCz,tmswf);
//printf("113(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
}
threadnum=tnum;
//printf("13(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
if (tid==0)
{
myloop=1;//myloop,,,3
while (myloop < gpuend)
{
//
dipolesum=dipolesum+*(iloops[0]+*(itask[1]+myloop)/3);
iterationsum++;
//printf("gsNO:=%d,itask[1]+0=%d,myid=%d,tid=%d,useGPU=%d,Gnum=%d,iTime=%d,dipole=%d\n",myloop,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop),*(iloops[0]+*(itask[1]+myloop)/3));
BSPitmm(*(itask[1]+myloop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+myloop)/3-1][0]);
myloop++;
};
}
else
{
myloop=(tail-1)-(tid-1);
while (myloop > cpuend)
{
dipolesum=dipolesum+*(iloops[0]+*(itask[1]+myloop)/3);iterationsum++;
//printf("csNO:=%d,itask[1]+0=%d,myid=%d,tid=%d,useGPU=%d,Gnum=%d,iTime=%d,dipole=%d\n",myloop,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop),*(iloops[0]+*(itask[1]+myloop)/3));
BSPitmm(*(itask[1]+myloop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+myloop)/3-1][0]);
s=k-s;myloop=myloop-s;//myloop=myloop-(tnum-1);
};
};
fprintf(stdout,"static over,tid=%d,myid=%d,dipolesum=%d,iterationsum=%d\n", tid,myid,dipolesum,iterationsum);
#pragma omp critical
{
myiTime=count;count++;
//printf("3-1myiTime=%d,*(itask[1]+0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d\n",myiTime,*(itask[1]+0),myid,tid,useGPU,GPUnum);
if (useGPU==1 && tid==0)// && GPUnum>0)
{
myloop=iTimebegin;iTimebegin++;
//printf("3-3myiTime=%d,*(itask[1]+0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,iTime=%d\n",myiTime,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop));
}
else
{
myloop=iTimeend;iTimeend--;
//printf("3-4myiTime=%d,*(itask[1]+0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,iTime=%d\n",myiTime,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop));
};
//printf("3-2myiTime=%d,*(itask[1]+0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,iTime=%d\n",myiTime,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop));
}
while (myiTime <= cpuend)
{
dipolesum=dipolesum+*(iloops[0]+*(itask[1]+myloop)/3);iterationsum++;
//printf("D:=%d,itask[1]+0=%d,myid=%d,tid=%d,useGPU=%d,Gnum=%d,iTime=%d,dipole=%d\n",myiTime,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop),*(iloops[0]+*(itask[1]+myloop)/3));
BSPitmm(*(itask[1]+myloop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+myloop)/3-1][0]);
#pragma omp critical
{
myiTime=count;count++;
if (useGPU==1 && tid==0 )//&& GPUnum>0)
{
myloop=iTimebegin;iTimebegin++;
}
else
{
myloop=iTimeend;iTimeend--;
};
}
};
//tail
//
//
//
#pragma omp critical
{
myiTime=count1;count1++;
if (useGPU==1 && tid==0)// && GPUnum>0)
{
myloop=tailbegin;tailbegin++;
}
else
{
myloop=tailend;tailend--;
};
}
while (myiTime <= *(itask[1]+0))
{
dipolesum=dipolesum+*(iloops[0]+*(itask[1]+myloop)/3);iterationsum++;
//printf("T:=%d,itask[1]+0=%d,myid=%d,tid=%d,useGPU=%d,Gnum=%d,iTime=%d,dipole=%d\n",myiTime,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop),*(iloops[0]+*(itask[1]+myloop)/3));
BSPitmm(*(itask[1]+myloop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+myloop)/3-1][0]);
#pragma omp critical
{
myiTime=count1;count1++;
if (useGPU==1 && tid==0)// && GPUnum>0)
{
myloop=tailbegin;tailbegin++;
}
else
{
myloop=tailend;tailend--;
};
}
};
//
tend=clock();
fprintf(stdout,"!threadtime = %f,tid=%d,myid=%d,dipolesum=%d,iterationsum=%d\n", (tend-tbegin)/CLK_TCK,tid,myid,dipolesum,iterationsum);
#pragma omp barrier
};//by sf 090828 OpenMP--end
*/
//MPItimeend=clock();
// fprintf(stdout,"!!!MPItime = %f,myid=%d,*(itask[1]+0)=%d\n", (MPItimeend-MPItimebegin)/CLK_TCK,myid,*(itask[1]+0));
// iTime=(numprocs-myid)*3;
//#pragma omp parallel //private(iTime) //jingtai //by sf 090403 OpenMP--begin
//{ //by sf 090403 OpenMP--begin
// int tid=omp_get_thread_num(),tnum=omp_get_num_threads(),myiTime;
// if (useGPU==1 && tid==0 && GPUnum>0)
// {
// gpu_transdata(epicX_old,epicY_old,epicZ_old,tnd,r,rn,endoBx,endoBy,endoBz,endoCx,endoCy,endoCz,tmswf);
// }
// threadnum=tnum;
// #pragma omp critical
// {
// myiTime=iTime;iTime=iTime+numprocs*3;
// }
// while (myiTime <= mTime) {
// BSPitmm(myiTime, tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[myiTime/3-1][0]); //jintai iTime
// #pragma omp critical
// {
// //*(iStep+myiTime/3)=myiTime/ND;//*(iStep+nTimeStep)=iTime/ND;
// myiTime=iTime;iTime=iTime+numprocs*3;
// }
// }
//#pragma omp barrier
// };//by sf 090403 OpenMP--end
bsptime[1] =clock();
nTimeStep=mTime/3;nVCG=nTimeStep;//printf("*iTime=%d,nVCG=%d,nTimeStep=%d,iTime_old=%d\n",iTime,nVCG,nTimeStep,iTime_old);//by sf 090402-6
for(int iTime=3*ND;iTime<=mTime;iTime=iTime+3) *(iStep+iTime/3)=iTime/ND; //by sf 090621
//if (myid==1)
//{i=27;printf("27beg-1bcasti=%d,myid=%d,ijk=%d,%d,%d\n",i,myid,*(gatherallijk[i/3]),*(gatherallijk[i/3]+1),*(gatherallijk[i/3]+2));
//};
//for(i=0;i<=mTime;i=i+3)
//{
//printf("****iTime=%d,loopcount=%d\n",i,*(countallijk+i/3));
//};
/*
if (numprocs>1) //1
{
for(i=0;i<=mTime;i=i+3)
{
*(countallijk+i/3)=*(iloops[0]+i/3)*3;
};
float VCGssend[3],POTsend[NL];
for(i=1;i<=mTime/3;i=i+1)
{
if (*(itask[0]+i)>0)
if (myid==0)
{
//printf("++endoPOT[%d]=%f,*(itask[0]+i/3)=%d,*(itask[0]+i)=%d,*(iloops[2]+i)=%d,myid=%d\n",*(iloops[2]+i)/3,*(endoPOT[*(iloops[2]+i)/3]+3),*(itask[0]+i),*(itask[0]+i),*(iloops[2]+i),myid);
j=*(iloops[2]+i)/3;
int sendID=*(itask[0]+i);
//*(iloops[2]+i)/3iTime/3,endoPOT0,iTime/3-1,VCGs0
//POT
//printf("j=%d,*(iloops[1]+j)*3=%d,myid=%d\n",j,*(iloops[1]+i)*3,myid);
*(VCGs[0]+j)=VCGssend[0];*(VCGs[1]+j)=VCGssend[1];*(VCGs[2]+j)=VCGssend[2];
for(int n=0;n<nPos;n++) {
*(POT[n]+j)=POTsend[n];
}
//printf("--endoPOT[%d]=%f,*(itask[0]+i/3)=%d,*(itask[0]+i)=%d,*(iloops[2]+i)=%d,myid=%d\n",*(iloops[2]+i)/3,*(endoPOT[*(iloops[2]+i)/3]+3),*(itask[0]+i),*(itask[0]+i),*(iloops[2]+i),myid);
}
else
{
if (myid==*(itask[0]+i))
{
j=*(iloops[2]+i)/3;
//printf("j=%d,*(iloops[1]+j)*3=%d,myid=%d\n",j,*(iloops[1]+i)*3,myid);
VCGssend[0]=*(VCGs[0]+j);VCGssend[1]=*(VCGs[1]+j);VCGssend[2]=*(VCGs[2]+j);
for(int n=0;n<nPos;n++) {
POTsend[n]=*(POT[n]+j);
}
}
//printf("##endoPOT[%d]=%f,*(itask[0]+i)=%d,*(itask[0]+i)=%d,*(iloops[2]+i)=%d,myid=%d\n",*(iloops[2]+i)/3,*(endoPOT[*(iloops[2]+i)/3]+3),*(itask[0]+i),*(itask[0]+i),*(iloops[2]+i),myid);
};
};
};
*/
bsptime[2] =clock();
//if (myid==0)
{
CFile f2;
CFileException e2;
if (!f2.Open( dataPath+"tour.dpn ", CFile::modeCreate | CFile::modeWrite, &e2 )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e2.m_cause << "\n";
#endif
}
int idpl;//printf("mTime=%d\n",mTime);
for(iTime=3;iTime<=mTime;iTime=iTime+3)
{
idpl=*(countallijk+(iTime/3))/3;
f2.Write(&iTime,2);//f2.Write(&iTime0,2);
f2.Write(&idpl,2);//f2.Write(&idpl,2);
//fprintf(fptime,"%d\n",iTime);
//fprintf(fptime,"%d\n",idpl);
};
f2.Close();
//fprintf(stdout,"f2.Close();,myid=%d\n",myid);fflush(stdout);
//}//single-tour.dpn-end
// fclose(fptime);
//printf("f2-over\n");
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'d');
//filepath.SetAt(index+2,'p');
//filepath.SetAt(index+3,'l');
//#pragma omp single
// {
CFile f3;
CFileException e3;
if (!f3.Open(dataPath+"tour.dpl ", CFile::modeCreate | CFile::modeWrite, &e3 )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e3.m_cause << "\n";
#endif
}
//fptime=fopen(dataPath+"dpl.txt","w") ;
short int ii,jj,kk;
for(iTime=3;iTime<=mTime;iTime=iTime+3)
{
//fprintf(fptime,"%d\n",iTime);
f3.Write(&iTime,2);//f.Write(&iTime0,2);//from line 4070
for(j=0;j<*(countallijk+iTime/3);j=j+3)
{
ii=*(gatherallijk[iTime/3]+j);f3.Write(&ii,2);//f.Write(gatherallijk[iTime/3]+j,sizeof(int));
jj=*(gatherallijk[iTime/3]+j+1);f3.Write(&jj,2);//f.Write(gatherallijk[iTime/3]+j+1,sizeof(int));
kk=*(gatherallijk[iTime/3]+j+2);f3.Write(&kk,2);//f.Write(gatherallijk[iTime/3]+j+2,sizeof(int));
//fprintf(fptime,"%d\n",*(gatherallijk[iTime/3]+j));fprintf(fptime,"%d\n",*(gatherallijk[iTime/3]+j+1));fprintf(fptime,"%d\n",*(gatherallijk[iTime/3]+j+2));
//f.Write(gatherijk+j+1,2);
//f.Write(gatherijk+j+2,2);
f3.Write(gatheralldpl[iTime/3]+j,4*3);//f.Write(gatherdpl+j,4*3);
//fprintf(fptime,"%f\n",*(gatheralldpl[iTime/3]+j));fprintf(fptime,"%f\n",*(gatheralldpl[iTime/3]+j+1));fprintf(fptime,"%f\n",*(gatheralldpl[iTime/3]+j+2));
//f.Write(gatherdpl+j+1,4);
//f.Write(gatherdpl+j+2,4);
};
};
f3.Close();
//printf("f-over\n"); fclose(fptime);
//}//single-tour.dpl-end
//fprintf(stdout,"f3.Close();,myid=%d\n",myid);fflush(stdout);
//by sf 090408 write dpl --end
//#pragma omp single //single tour.ecp begin
//{
// Save endocardial potential data
CFile f4;
CFileException e4;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'e');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'p');
if( !f4.Open( dataPath+"tour.ecp ", CFile::modeCreate | CFile::modeWrite, &e4 ) )
{
#ifdef _DEBUG
afxDump << "File could not be opened " << e4.m_cause << "\n";
#endif
}
//FILE *fptime;//by sf
//
//fptime=fopen(dataPath+"ecp-gpu.txt","w") ;//by sf
//fprintf(fptime,"%d\n",nTimeStep);//by sf
f4.Write(&nTimeStep,2);
for(i=1;i<=nTimeStep;i++) {f4.Write(iStep+i,2);
//fprintf(fptime,"%d\n",*(iStep+i));
}
f4.Write(&NendoB, 2);
f4.Write(&NendoC, 2);
//fprintf(fptime,"%d\n",NendoB);
//fprintf(fptime,"%d\n",NendoC);
for(i=0;i<NendoB;i++) {
f4.Write(&endoBx[i], 2);
f4.Write(&endoBy[i], 2);
f4.Write(&endoBz[i], 2);
//fprintf(fptime,"%d\n",endoBx[i]);
//fprintf(fptime,"%d\n",endoBy[i]);
//fprintf(fptime,"%d\n",endoBz[i]);
}
for(i=0;i<NendoC;i++) {
f4.Write(&endoCx[i], 2);
f4.Write(&endoCy[i], 2);
f4.Write(&endoCz[i], 2);
//fprintf(fptime,"%d\n",endoCx[i]);
//fprintf(fptime,"%d\n",endoCy[i]);
//fprintf(fptime,"%d\n",endoCz[i]);
}
// fclose(fptime);
//fptime=fopen(dataPath+"ecp-endoPOT-gpu.txt","w") ;
//TRACE("\nTotal Time Step: %d, Total Endocardial Points: %d+%d",nTimeStep,NendoB,NendoC);
for(i=0;i<nTimeStep;i++) {
for(j=0;j<(NendoB+NendoC);j++) {
f4.Write(endoPOT[i]+j,4);
//fprintf(fptime,"%f\n",*(endoPOT[i]+j));
}
}
f4.Close();
//fclose(fptime);
//} //single tour.ecp end
//fprintf(stdout,"f4.Close();,myid=%d\n",myid);fflush(stdout);
// Save VCG data
// char* pFileName = "f:/VCG/VCG.6";
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'v');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'g');
//#pragma omp single //single tour.vcg begin
//{
CFile f5;
CFileException e5;
if( !f5.Open( dataPath+"tour.vcg ", CFile::modeCreate | CFile::modeWrite, &e5 ) )
{
#ifdef _DEBUG
afxDump << "File could not be opened " << e5.m_cause << "\n";
#endif
}
f5.Write(&nVCG,2);
for(j=1;j<=nVCG;j++) {
f5.Write(iStep+j,2);
for(i=0;i<3;i++)
f5.Write(VCGs[i]+j,4);
}
f5.Close();
//} //single tour.vcg end
// ----- save potential data ------
// ++++ eff is obtained to make max. value of ECG =2.0mv ++++
eff=(float)26.5730;
// pFileName = "f:/BSP/BSP.6";
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'b');
//filepath.SetAt(index+2,'s');
//filepath.SetAt(index+3,'p');
//FILE *fptime;//sf
//fptime=fopen(dataPath+"bsp-gpu.txt","w") ;//sf
//#pragma omp single //single tour.bsp begin
//{
CFile f6;
CFileException e6;
if( !f6.Open( dataPath+"tour.bsp ", CFile::modeCreate | CFile::modeWrite, &e6 ) )
{
#ifdef _DEBUG
afxDump << "File could not be opened " << e6.m_cause << "\n";
#endif
}
f6.Write(&nTimeStep,2);
//fprintf(fptime,"%d\n",nTimeStep);//sf
for(i=1;i<=nTimeStep;i++) {f6.Write(iStep+i,2);
//fprintf(fptime,"%d\n",*(iStep+i));//sf
}
for(i=1;i<=nTimeStep;i++) {
int n = 0;
for(n=0;n<NL;n++) {
BSPm=(short int)(eff*(*(POT[n]+i)));
f6.Write(&BSPm,2);
//fprintf(fptime,"%d\n",BSPm);//sf
}
}
f6.Close();
//fprintf(stdout,"f6.Close();nTimeStep=%d,myid=%d\n",nTimeStep,myid);fflush(stdout);
//} //single tour.bsp end
//fclose(fptime);//sf
//-------------------- modified by ALF at 2008-8-19 begin -------------------->
//add: save epicardial potential as well as position
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'e');
//filepath.SetAt(index+2,'p');
//filepath.SetAt(index+3,'c');
//#pragma omp single //single tour.ecp begin
//{
CFile f7;
CFileException e7;
if( !f7.Open( dataPath+"tour.epc ", CFile::modeCreate | CFile::modeWrite, &e7 ) )
{
#ifdef _DEBUG
afxDump << "File could not be opened " << e7.m_cause << "\n";
#endif
}
//fprintf(stdout,"f7.Write(&nTimeStep,sizeof(nTimeStep));Nepic=%d,nTimeStep=%d,myid=%d\n",Nepic,nTimeStep,myid);fflush(stdout);
f7.Write(&nTimeStep,sizeof(nTimeStep));
for(i=1;i<=nTimeStep;i++)
f7.Write(iStep+i,sizeof(short int));
//fprintf(stdout,"for(i=1;i<=nTimeStep;i++) ;Nepic=%d,nTimeStep=%d,myid=%d\n",Nepic,nTimeStep,myid);fflush(stdout);
f7.Write(&Nepic, sizeof(Nepic));
for(i=0;i<Nepic;i++) {
f7.Write(&epicX[i], sizeof(short int));
f7.Write(&epicY[i], sizeof(short int));
f7.Write(&epicZ[i], sizeof(short int));
}
//fprintf(stdout,"for(i=0;i<Nepic;i++);Nepic=%d,nTimeStep=%d,myid=%d\n",Nepic,nTimeStep,myid);fflush(stdout);
//FILE *fptime;
//fptime=fopen(dataPath+"Nepic1.txt","a") ;
//fprintf(fptime,"**********useGPU=%d****nTimeStep=%d**Nepic=%d*\n",useGPU,nTimeStep,Nepic);
//TRACE("\nTotal Time Step: %d, Total Endocardial Points: %d+%d",nTimeStep,NendoB,NendoC);
for(i=0;i<nTimeStep;i++) {
for(j=0;j<Nepic;j++) {
f7.Write(&epicPOT[i][j],sizeof(float));
//fprintf(fptime,"%f\n",epicPOT[i][j]);
}
}
f7.Close(); //fclose(fptime);
//fprintf(stdout,"f7.Close();,myid=%d\n",myid);fflush(stdout);
//-------------------- modified by ALF at 2008-8-19 end --------------------<
//printf("free1-,mtime=%d\n",mTime);
//} //single tour.epc end
//#pragma omp barrier
//};//by sf 090403 OpenMP--end
}; // if (myid==0){
//fprintf(stdout,"***comunicate--------111--,myid=%d\n",myid);fflush(stdout);
//for(iTime=3;iTime<=mTime;iTime=iTime+3)
//{
// //free(gatheralldpl[iTime/3]);free(gatherallijk[iTime/3]);
//};
// fprintf(stdout,"***comunicate-1111-ok,myid=%d\n",myid);fflush(stdout);
//free(countallijk);//free(countallijk_reduce); //free(iTimetid);
//free(schedulelist);//free(iTimeloops);
//fprintf(stdout,"***comunicate-2222-ok,myid=%d\n",myid);fflush(stdout);
bsptime[3] =clock();
//printf("BSPitmm,begin-end=%f,writefile=%f,useGPU=%d,threadnum=%d\n",(bsptime[1]-bsptime[0])/CLK_TCK,(bsptime[2]-bsptime[1])/CLK_TCK,useGPU,threadnum);
/*if(myid==0)
{
FILE *fptime;
fptime=fopen(dataPath+"gputime.txt","a") ;
fprintf(fptime,"!!!MPItime = %f,myid=%d,*(itask[1]+0)=%d\n", (MPItimeend-MPItimebegin)/CLK_TCK,myid,*(itask[1]+0));
fprintf(stdout,"BSPmitttime=%f,communicate=%f,writefile=%f\n",(bsptime[1]-bsptime[0])/CLK_TCK,(bsptime[2]-bsptime[1])/CLK_TCK,(bsptime[3]-bsptime[2])/CLK_TCK);
fprintf(fptime,"BSPmitttime=%f,communicate=%f,writefile=%f\n",(bsptime[1]-bsptime[0])/CLK_TCK,(bsptime[2]-bsptime[1])/CLK_TCK,(bsptime[3]-bsptime[2])/CLK_TCK);
fclose(fptime);
};*/
for(i=0;i<3;i++) {
free(VCGs[i]);
free(VCGs_reduce[i]);//by sf 090622
free(tnd[i]);
}
for(i=0;i<TSTEP;i++) {
free(endoPOT[i]);
//free(endoPOT_reduce[i]);//by sf 090622
}
//fprintf(stdout,"***comunicate-3333-ok,myid=%d\n",myid);fflush(stdout);
free(hnn);
free(endoHnnA);
free(endoHnnB);
free(endoHnnC);
//fprintf(stdout,"***comunicate-0000-ok,myid=%d\n",myid);fflush(stdout);
//return;
}
//-------------------- modified by ALF at 2008-8-19 begin -------------------->
//modified
void BSPitmm(short int iTime0, short int **tnd,float *hnn, float *endoHnnA, float *endoHnnB, float *endoHnnC,float **endoPOT,float **VCGs,short int nsnrt, float *epicHnn, float *epicPOT) {
//void BSPitmm(short int iTime0, short int **tnd,float *hnn, float *endoHnnA, float *endoHnnB, float *endoHnnC, float *epicHnn, float *epicPOT) {
ASSERT(epicHnn != NULL);
//-------------------- modified by ALF at 2008-8-19 end --------------------<
float aptcalm(short int,short int,short int,short int,short int);
void anfct(short int i, short int j, short int k, float v[3]);
char iCell;
const short int OK_SAV=1;
short int iseqx[12]={ -1, 0, 0, 1, 1, 0, 1, 0, 0,-1,-1, 0 };
short int iseqy[12]={ 0,-1, 0,-1, 0, 1, 0, 1, 0, 1, 0,-1 };
short int iseqz[12]={ 0, 0,-1, 0,-1,-1, 0, 0, 1, 0, 1, 1 };
short int nskip=2;
short int i,j,k,l,ix,iy,iz,icell,l6,jx,jy,jz,jcell;
int nsum,n;
int intvl;
int idpl;
float asd,add,rtmax,gsum,compm,compp,compo,ax,ay,az;
float r1,r3,r5,dr,ds,rv3,bx,by,bz,ECGs;
float der[NL],ders[NL];
double grad[6];
float dpl[3];
float posi, posj, posk;
float r2,GRD;
float tmpdpl;
// endocardial
int n0,n1,n2,ni;
float *surfPOTi,*u1;
short int nhb, eTime;
//long temploc;
int tid=omp_get_thread_num();
int myid, numprocs;
int namelen;
// fprintf(stdout,"BSPitmm !! tid= %d myid= %d numprocs= %d is processor_name= %s,iTime0= %d\n",tid, myid, numprocs, processor_name,iTime0);
//fflush(stdout);
short int countijk=0;//by sf-090329***countijkijk,gatherijk[20000],
//float gatherdpl[60000];//by sf-090321***countijkijk
float *endoHnnA_old,*POTi_old,VCG_old[3];//by sf-090402-4
endoHnnA_old=(float *) malloc(2*NENDO*ND3*4);//by sf-090402-4
POTi_old=(float *) malloc(NL*4);//by sf-090403-1
float *epicPOTold;
epicPOTold=(float *) malloc(Nepic*4);
for(i=0;i<Nepic;i++) *(epicPOTold+i)=(float)0;
//double bsptimes1[3]={0.0,0.0,0.0};bsptimes1[0] = clock();
surfPOTi=(float *) malloc((NL-2)*2*4);
u1=(float *) malloc((NL-2)*2*4);
if ((surfPOTi==NULL)||(u1==NULL)) {
MessageBox(NULL,"Out of memory !",NULL,MB_OK);
return;// 0;
}
for(ni=0;ni<(NL-2)*2;ni++) {
*(surfPOTi+ni)=(float)0;
*(u1+ni)=(float)0;
}
for(ni=0;ni<(NendoB+NendoC);ni++) {
*(endoHnnA_old+ni)=(float)0;
}
// ------- initialization ---------
for(i=0;i<NL;i++) ders[i]=(float)0;
// Save dipole data
CFile f;
CFileException e;
short int index;
while (1) {
idpl = 0;
asd=(float)0;
add=(float)0;
nsum=0;
rtmax=(float)0;
for(n=0;n<nPos;n++) {
*(POTi_old+n)=(float)0;
der[n]=(float)0;
}
for(n=0;n<3;n++) VCG_old[n]=(float)0;//VCG[n]=(float)0;
//tid=omp_get_thread_num();
//if (useGPU==1 && tid==0) gpu_BSPitmm_Malloc(POTi_old,der,endoHnnA_old,surfPOTi);//Comment by SWF (2009-2-7-15)(For:)//by sf-090402-4
//f.Write(&iTime0,2);//by sf 090329
//add fibre conduction contribution, iCell
if (useGPU==1 && tid==0 && GPUnum>0)
{
gpu_BSPitmm_HostToDevice(POTi_old,der,endoHnnA_old,surfPOTi);
};
for (nhb=0; nhb<nHB; nhb++) {
i=iHB[0][nhb];
j=iHB[1][nhb];
k=iHB[2][nhb];
for (ni=0;ni<mxcycle;ni++) {
eTime=vHB[ni][nhb];
if (eTime==(short int)(iTime0/3)) {
compo=(aptcalm(i,j,k,4,iTime0)+90)/nskip/nskip;
dpl[0]=compo/10;
dpl[1]=compo/10;
dpl[2]=compo;
if (OK_SAV==1) {
//by sf-090329
//f.Write(&i,2);
//f.Write(&j,2);
//f.Write(&k,2);
//for (n=0;n<3;n++) {
// f.Write(&dpl[n],4);
//}
*(gatherallijk[iTime0/3]+countijk)=i;*(gatherallijk[iTime0/3]+countijk+1)=j;*(gatherallijk[iTime0/3]+countijk+2)=k;
*(gatheralldpl[iTime0/3]+countijk)=dpl[0];*(gatheralldpl[iTime0/3]+countijk+1)=dpl[1];*(gatheralldpl[iTime0/3]+countijk+2)=dpl[2];
countijk=countijk+3;
idpl++;
}
posi=HRTx0+i*tmswf[0][0]+j*tmswf[0][1]+k*tmswf[0][2];
posj=HRTy0+i*tmswf[1][0]+j*tmswf[1][1]+k*tmswf[1][2];
posk=HRTz0+i*tmswf[2][0]+j*tmswf[2][1]+k*tmswf[2][2];
// potential distribution generated by
// a single dipole in infinite medium
if (useGPU==1 && tid==0 && GPUnum>0)
{
//gpu_BSPitmm_HostToDevice(POTi,der,endoHnnA,surfPOTi);
gpu_dpl_all(0,posi,posj,posk,nPos,dpl,POTi_old,der,HRTx0,HRTy0,HRTz0,NendoB,NendoC,endoHnnA_old,endoBx,endoBy,endoBz,tmswf,epicPOTold);
//gpu_dpl_nPos(posi,posj,posk,nPos,dpl,POTi_old,der);
//gpu_dpl_Nendo(posi,posj,posk,HRTx0,HRTy0,HRTz0,NendoB,0,dpl,endoHnnA_old,endoBx,endoBy,endoBz,tmswf);
//gpu_dpl_Nendo(posi,posj,posk,HRTx0,HRTy0,HRTz0,NendoC,NendoB,dpl,endoHnnA_old,endoCx,endoCy,endoCz,tmswf);
//gpu_dpl_nPos_2(posi,posj,posk,dpl);
//gpu_BSPitmm_DeviceToHost(POTi,der,endoHnnA,surfPOTi);
}
else
{
///* //sf
for(n=0;n<nPos;n++) {
ax=*(r[0]+n)-posi;
ay=*(r[1]+n)-posj;
az=*(r[2]+n)-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
r5=(float)(r2*r3);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
ds=3*dr/r5;
rv3=1/r3;
bx=dpl[0]*rv3-ax*ds;
by=dpl[1]*rv3-ay*ds;
bz=dpl[2]*rv3-az*ds;
*(POTi_old+n)+=dr*rv3;
*(der+n)+=*(rn[0]+n)*bx+*(rn[1]+n)*by+*(rn[2]+n)*bz;
}
//TRACE("\niCell4 %d %d %d %d %f %f",iTime0,i,j,k,compo,*(POTi+94));
for(n=0;n<NendoB;n++) {
// ---- measurement location -------
ax=HRTx0+endoBx[n]*tmswf[0][0]+endoBy[n]*tmswf[0][1]+endoBz[n]*tmswf[0][2]-posi;
ay=HRTy0+endoBx[n]*tmswf[1][0]+endoBy[n]*tmswf[1][1]+endoBz[n]*tmswf[1][2]-posj;
az=HRTz0+endoBx[n]*tmswf[2][0]+endoBy[n]*tmswf[2][1]+endoBz[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(endoHnnA_old+n)+=dr*rv3;
}
for(n=0;n<NendoC;n++) {
// ---- measurement location -------
ax=HRTx0+endoCx[n]*tmswf[0][0]+endoCy[n]*tmswf[0][1]+endoCz[n]*tmswf[0][2]-posi;
ay=HRTy0+endoCx[n]*tmswf[1][0]+endoCy[n]*tmswf[1][1]+endoCz[n]*tmswf[1][2]-posj;
az=HRTz0+endoCx[n]*tmswf[2][0]+endoCy[n]*tmswf[2][1]+endoCz[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(endoHnnA_old+n+NendoB)+=dr*rv3;
}
for(n=0;n<(NL-2)*2;n++) {
// ---- measurement location -------
n0=*(tnd[0]+n)-1;
n1=*(tnd[1]+n)-1;
n2=*(tnd[2]+n)-1;
ax=(*(r[0]+n0)+*(r[0]+n1)+*(r[0]+n2))/3-posi;
ay=(*(r[1]+n0)+*(r[1]+n1)+*(r[1]+n2))/3-posj;
az=(*(r[2]+n0)+*(r[2]+n1)+*(r[2]+n2))/3-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(surfPOTi+n)+=dr*rv3;
//Uinf
}
//*/ //sf
}
}
}
}
/*
for (nhb=0; nhb<nttl; nhb++) {
i=ipttl[0][nhb];
j=ipttl[1][nhb];
k=ipttl[2][nhb];
iCell=*(mapCell[k]+j*NJ+i);
if (iCell<=1) continue;
if (iCell>=15) continue;
temploc=*(locXCT[k]+j*NJ+i);
eTime=*(mapXCTm[iTime0/mBCL]+temploc);
if (eTime==(short int)(iTime0/3)) {
dpl[0]=50;
dpl[1]=50;
dpl[2]=50;
posi=HRTx0+i*tmswf[0][0]+j*tmswf[0][1]+k*tmswf[0][2];
posj=HRTy0+i*tmswf[1][0]+j*tmswf[1][1]+k*tmswf[1][2];
posk=HRTz0+i*tmswf[2][0]+j*tmswf[2][1]+k*tmswf[2][2];
// potential distribution generated by
// a single dipole in infinite medium
for(n=0;n<nPos;n++) {
ax=*(r[0]+n)-posi;
ay=*(r[1]+n)-posj;
az=*(r[2]+n)-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
r5=(float)(r2*r3);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
ds=3*dr/r5;
rv3=1/r3;
bx=dpl[0]*rv3-ax*ds;
by=dpl[1]*rv3-ay*ds;
bz=dpl[2]*rv3-az*ds;
*(POTi+n)+=dr*rv3;
*(der+n)+=*(rn[0]+n)*bx+*(rn[1]+n)*by+*(rn[2]+n)*bz;
}
TRACE("\niCellx %d %d %d %d %f %f",iTime0,i,j,k,compo,*(POTi+94));
for(n=0;n<NendoB;n++) {
// ---- measurement location -------
ax=HRTx0+endoBx[n]*tmswf[0][0]+endoBy[n]*tmswf[0][1]+endoBz[n]*tmswf[0][2]-posi;
ay=HRTy0+endoBx[n]*tmswf[1][0]+endoBy[n]*tmswf[1][1]+endoBz[n]*tmswf[1][2]-posj;
az=HRTz0+endoBx[n]*tmswf[2][0]+endoBy[n]*tmswf[2][1]+endoBz[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(endoHnnA+n)+=dr*rv3;
}
for(n=0;n<NendoC;n++) {
// ---- measurement location -------
ax=HRTx0+endoCx[n]*tmswf[0][0]+endoCy[n]*tmswf[0][1]+endoCz[n]*tmswf[0][2]-posi;
ay=HRTy0+endoCx[n]*tmswf[1][0]+endoCy[n]*tmswf[1][1]+endoCz[n]*tmswf[1][2]-posj;
az=HRTz0+endoCx[n]*tmswf[2][0]+endoCy[n]*tmswf[2][1]+endoCz[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(endoHnnA+n+NendoB)+=dr*rv3;
}
for(n=0;n<(NL-2)*2;n++) {
// ---- measurement location -------
n0=*(tnd[0]+n)-1;
n1=*(tnd[1]+n)-1;
n2=*(tnd[2]+n)-1;
ax=(*(r[0]+n0)+*(r[0]+n1)+*(r[0]+n2))/3-posi;
ay=(*(r[1]+n0)+*(r[1]+n1)+*(r[1]+n2))/3-posj;
az=(*(r[2]+n0)+*(r[2]+n1)+*(r[2]+n2))/3-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(surfPOTi+n)+=dr*rv3;
//Uinf
}
}
}
*/
//gpu_BSPitmm_HostToDevice(POTi,der,endoHnnA,surfPOTi);
//printf("Time=%d,\n", iTime0);
// up, bottom, left, right, front, behind
for (k=0;k<=NK;k+=nskip) { // i,j // < --> <= August 10,1996
for (j=0;j<NJ;j+=nskip) {
for (i=NI;i>-1;i-=nskip) {
if (k<*(kmin+NI*j+i) || k>*(kmax+NI*j+i)) {
continue;
}
iCell=*(mapCell[k]+NI*j+i);
// +++++++++ special fiber neglected +++++
if (iCell<=1) continue; /*<Comment by ALF> null or SN*/
if (iCell>=15) continue; /*<Comment by ALF> out of define*/
// include fiber conduction
if((iCell>=3)&&(iCell<=6)) continue; /*<Comment by ALF> not AVN HB BB PKJ*/
compo=aptcalm(i,j,k,iCell,iTime0);
// --------- neighberhood search ---------
gsum=(float)0;
for (l=0;l<6;l++) {
compm=(float)0.0;
compp=(float)0.0;
grad[l]=(double)0.0;
ix=i+iseqx[l];
iy=j+iseqy[l];
iz=k+iseqz[l];
if ((ix>=0)&&(ix<NI)&&(iy>=0)&&(iy<NJ)&&(iz>=0)&&(iz<NK)) {
icell=*(mapCell[iz]+iy*NI+ix);
if ((icell>1)&&(icell<15)&&((icell<3)||(icell>6))) {
compm=aptcalm(ix,iy,iz,icell,iTime0);
//if (iTime0 ==3 && compm != -90.) TRACE("\nB %d %d %d %f",ix,iy,iz, compm);
grad[l]+=compm-compo;
}
}
l6=l+6; /*<Comment by ALF> opposite one*/
jx=i+iseqx[l6];
jy=j+iseqy[l6];
jz=k+iseqz[l6];
if ((jx>=0)&&(jx<NI)&&(jy>=0)&&(jy<NJ)&&(jz>=0)&&(jz<NK)) {
jcell=*(mapCell[jz]+jy*NI+jx);
if ((jcell>1)&&(jcell<15)&&((jcell<3)||(jcell>6))) {
compp=aptcalm(jx,jy,jz,jcell,iTime0);
grad[l]+=compo-compp;
}
}
}
for (l=0;l<6;l++)
gsum+=(float)fabs((double)grad[l]);
if (gsum==0) continue;
// close dpl file
// dipole number --> nsum; position-->ipos
for (n=0;n<3;n++) {
dpl[n]=(float)0;
for (short int m=0;m<6;m++)
dpl[n]+=tmswf[n][m]*grad[m];
// -- take conductivity factor into consideration --
dpl[n]=dpl[n]*(*(iparm+NPARM*(iCell-1)+12))/(100);
// f.Write(&dpl[n],4);
// *(dplm[n]+idpl) = dpl[n];
// >>>>> moved to an independent loop below >>>>
// VCG[n]+=dpl[n];
}
// >>>>>>>>>> aniso >>>>>>>>
tmpdpl=dpl[0];
if (ANISO==1 && icell==7) {
anfct(i,j,k,dpl);
}
// if (tmpdpl-dpl[0]>0.0001 || tmpdpl-dpl[0]<-0.0001)
// TRACE("\ndpl %2d %2d %2d %f %f",i+1,j+1,k+1, tmpdpl, dpl[0]);
if (OK_SAV==1) {
//by sf-090329
//f.Write(&i,2);
//f.Write(&j,2);
//f.Write(&k,2);
//for (n=0;n<3;n++) {
// f.Write(&dpl[n],4);
//}
*(gatherallijk[iTime0/3]+countijk)=i;*(gatherallijk[iTime0/3]+countijk+1)=j;*(gatherallijk[iTime0/3]+countijk+2)=k;
*(gatheralldpl[iTime0/3]+countijk)=dpl[0];*(gatheralldpl[iTime0/3]+countijk+1)=dpl[1];*(gatheralldpl[iTime0/3]+countijk+2)=dpl[2];
countijk=countijk+3;
}
for (n=0;n<3;n++) {
VCG_old[n]+=dpl[n];//VCG[n]+=dpl[n];
}
idpl++;
// <<<<<<<<<< aniso <<<<<<<<
posi=HRTx0+i*tmswf[0][0]+j*tmswf[0][1]+k*tmswf[0][2];
posj=HRTy0+i*tmswf[1][0]+j*tmswf[1][1]+k*tmswf[1][2];
posk=HRTz0+i*tmswf[2][0]+j*tmswf[2][1]+k*tmswf[2][2];
// potential distribution generated by
// a single dipole in infinite medium
//------------ 2009-2-4-16 BY SWF---------
// comment:
//printf("nPos*,itime0=%d", iTime0);
if (useGPU==1 && tid==0 && GPUnum>0)
{
// gpu_freetransdata();
//gpu_transdata(tnd,r,rn,endoBx,endoBy,endoBz,endoCx,endoCy,endoCz,tmswf);
//gpu_BSPitmm_HostToDevice(POTi,der,endoHnnA,surfPOTi);
gpu_dpl_all(1,posi,posj,posk,nPos,dpl,POTi_old,der,HRTx0,HRTy0,HRTz0,NendoB,NendoC,endoHnnA_old,endoBx,endoBy,endoBz,tmswf,epicPOTold);
//gpu_dpl_nPos(posi,posj,posk,nPos,dpl,POTi_old,der);
//gpu_dpl_Nendo(posi,posj,posk,HRTx0,HRTy0,HRTz0,NendoB,0,dpl,endoHnnA_old,endoBx,endoBy,endoBz,tmswf);
//gpu_dpl_Nendo(posi,posj,posk,HRTx0,HRTy0,HRTz0,NendoC,NendoB,dpl,endoHnnA_old,endoCx,endoCy,endoCz,tmswf);
//gpu_dpl_nPos_2(posi,posj,posk,dpl);
//gpu_dpl_Nepic(posi,posj,posk,HRTx0,HRTy0,HRTz0,dpl,tmswf,epicPOTold);
//gpu_BSPitmm_DeviceToHost(POTi,der,endoHnnA,surfPOTi);
///* //printf("$");
}
else
{
for(n=0;n<nPos;n++) {
ax=*(r[0]+n)-posi;
ay=*(r[1]+n)-posj;
az=*(r[2]+n)-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
r5=(float)(r2*r3);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
ds=3*dr/r5;
rv3=1/r3;
bx=dpl[0]*rv3-ax*ds;
by=dpl[1]*rv3-ay*ds;
bz=dpl[2]*rv3-az*ds;
*(POTi_old+n)+=dr*rv3;
*(der+n)+=*(rn[0]+n)*bx+*(rn[1]+n)*by+*(rn[2]+n)*bz;
}
// endocadial potential distribution generated by
// a single dipole in infinite medium
for(n=0;n<NendoB;n++) {
// ---- measurement location -------
ax=HRTx0+endoBx[n]*tmswf[0][0]+endoBy[n]*tmswf[0][1]+endoBz[n]*tmswf[0][2]-posi;
ay=HRTy0+endoBx[n]*tmswf[1][0]+endoBy[n]*tmswf[1][1]+endoBz[n]*tmswf[1][2]-posj;
az=HRTz0+endoBx[n]*tmswf[2][0]+endoBy[n]*tmswf[2][1]+endoBz[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(endoHnnA_old+n)+=dr*rv3;
}
for(n=0;n<NendoC;n++) {
// ---- measurement location -------
ax=HRTx0+endoCx[n]*tmswf[0][0]+endoCy[n]*tmswf[0][1]+endoCz[n]*tmswf[0][2]-posi;
ay=HRTy0+endoCx[n]*tmswf[1][0]+endoCy[n]*tmswf[1][1]+endoCz[n]*tmswf[1][2]-posj;
az=HRTz0+endoCx[n]*tmswf[2][0]+endoCy[n]*tmswf[2][1]+endoCz[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(endoHnnA_old+n+NendoB)+=dr*rv3;
}
//-------------------- modified by ALF at 2008-8-19 begin -------------------->
//add: epicardial potential distribution generated by
// a single dipole in infinite medium
for (n=0; n<Nepic; ++n) {
ax=HRTx0+epicX[n]*tmswf[0][0]+epicY[n]*tmswf[0][1]+epicZ[n]*tmswf[0][2]-posi;
ay=HRTy0+epicX[n]*tmswf[1][0]+epicY[n]*tmswf[1][1]+epicZ[n]*tmswf[1][2]-posj;
az=HRTz0+epicX[n]*tmswf[2][0]+epicY[n]*tmswf[2][1]+epicZ[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(epicPOT+n)+=dr*rv3;
}
//-------------------- modified by ALF at 2008-8-19 end --------------------<
for(n=0;n<(NL-2)*2;n++) {
// ---- measurement location -------
n0=*(tnd[0]+n)-1;
n1=*(tnd[1]+n)-1;
n2=*(tnd[2]+n)-1;
ax=(*(r[0]+n0)+*(r[0]+n1)+*(r[0]+n2))/3-posi;
ay=(*(r[1]+n0)+*(r[1]+n1)+*(r[1]+n2))/3-posj;
az=(*(r[2]+n0)+*(r[2]+n1)+*(r[2]+n2))/3-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(surfPOTi+n)+=dr*rv3;
//Uinf
}
};////test sf
//*/
//------------ 2009-2-4-16 BY SWF---------
}
}
}
if (useGPU==1 && tid==0 && GPUnum>0)
{
gpu_BSPitmm_DeviceToHost(epicPOTold,POTi_old,der,endoHnnA_old,surfPOTi);
for(i=0;i<Nepic;i++) *(epicPOT+i)=*(epicPOTold+i);
}
//bsptimes1[1] = clock();
//if (iTime0<1800 ) //sf
// {float dd=0,pp=0;
// for(int ff=0;ff<NendoB;ff++)
// {
// dd+=*(endoHnnA+ff);
// //pp+=*(POTi+ff);
// }
// FILE *fptime;
// fptime=fopen(dataPath+"data.txt","a") ;
// fprintf(fptime,"iTime0=%d,endoHnnA=%f,,\n",iTime0,dd);
// printf("iTime0=%d,endoHnnA=%f,,\n",iTime0,dd);
// fclose(fptime);
// };
//---- next Step -----
GRD=0; // ? April 29, 1996
for(i=0;i<3;i++)
GRD+=(bufVCG[1][i]-VCG_old[i])*(bufVCG[1][i]-VCG_old[i]);//GRD+=(bufVCG[1][i]-VCG[i])*(bufVCG[1][i]-VCG[i]);
intvl=iTime0-itbuf;
GRD=sqrt(GRD)/intvl;
GRD=100*GRD/939.513;
/*
i=0;
for (ni=0;ni<mxcycle;ni++) {
eTime=vHB[ni][0];
if ((eTime-iTime0/3)<4*ND) {
i=1;
}
}
if (i==1) {
nextStep=3*ND;
break;
}
if (GRD>10*ND) {
if((bufGRD<2)&&(intvl>3)) {
iTime0=itbuf+3;
continue;
}
nextStep=3*ND;
//nextStep=ND;
break;
}
if (GRD>5*ND) {
nextStep=6*ND; // 9 --> 6 August 11, 1996
//nextStep=2*ND; // 9 --> 6 August 11, 1996
break;
}
nextStep=12*ND; // 21 --> 12 August 11, 1996
*/
nextStep=3*ND; // 21 --> 12 August 11, 1996
break;
}
itbuf=iTime0;
bufGRD=(float)GRD;
// ---- the same value with the previous two ? --
for (n=0;n<3;n++) {
if ((VCG_old[n]!=bufVCG[0][n])||(VCG_old[n]!=bufVCG[1][n])) {//if ((VCG[n]!=bufVCG[0][n])||(VCG[n]!=bufVCG[1][n])) {
n=-1;
break;
}
}
//if (n != -1) {
// answer='s';
// answer='d';
// //return;
//}
//else
//{
answer='d';
for (n=0;n<3;n++) {
bufVCG[0][n]=bufVCG[1][n];
bufVCG[1][n]=VCG_old[n];//bufVCG[1][n]=VCG[n];
}
// --- boundary condition into Account-------
ECGs=(float)0;
for(j=0;j<nPos;j++) ECGs+=*(POTi_old+j);
ECGs*=alp;
for(j=0;j<nPos;j++) {
*(ders+j)=(float)0;
for(k=0;k<nPos;k++) *(ders+j)+=*(aw[j]+k)*(*(der+k)); // aw : j,k or k,j ?
*(POTi_old+j)+=-*(ders+j)-*(bw+j)*ECGs;
}
// body surface triangle
float sum, tmp, triarea, sumarea;
for (j=0; j<(NL-2)*2;j++) {
sum=0.0;
for(k=0;k<(nPos-2)*2;k++) {
tmp=*(surfPOTi+k);
sum+=*(hnn+j*(nPos-2)*2+k) * tmp;
}
*(u1+j)=sum;
}
triarea=0.0;
sumarea=0.0;
for(n=0;n<(NL-2)*2;n++) {
// ---- measurement location -------
n0=*(tnd[0]+n)-1;
n1=*(tnd[1]+n)-1;
n2=*(tnd[2]+n)-1;
ax=(*(r[0]+n0)-*(r[0]+n1));
ay=(*(r[1]+n0)-*(r[1]+n1));
az=(*(r[2]+n0)-*(r[2]+n1));
bx=(*(r[0]+n0)-*(r[0]+n2));
by=(*(r[1]+n0)-*(r[1]+n2));
bz=(*(r[2]+n0)-*(r[2]+n2));
tmp=(ax*by-bx*ay)*(ax*by-bx*ay)+(ax*bz-bx*az)*(ax*bz-bx*az)+(az*by-bz*ay)*(az*by-bz*ay);
tmp=0.5*sqrt(tmp);
triarea+=*(u1+n)*tmp;
sumarea+=tmp;
}
for (n=0; n<NendoB;n++) {
sum=0.0;
for(k=0;k<(nPos-2)*2;k++) {
tmp=*(u1+k);
sum +=*(endoHnnB+n*(nPos-2)*2+k) * tmp;
}
*(endoHnnA_old+n)+=sum-triarea/sumarea;
}
for (n=0; n<NendoC;n++) {
sum=0.0;
for(k=0;k<(nPos-2)*2;k++) {
tmp=*(u1+k);
sum +=*(endoHnnC+n*(nPos-2)*2+k) * tmp;
}
*(endoHnnA_old+n+NendoB)+=sum-triarea/sumarea;
}
//}//sf-090402-5 if (n != -1) {
//-------------------- modified by ALF at 2008-8-19 begin -------------------->
//add
for (n=0; n<Nepic;n++) {
sum=0.0;
for(k=0;k<(nPos-2)*2;k++) {
tmp=*(u1+k);
sum +=*(epicHnn+n*(nPos-2)*2+k) * tmp;
}
*(epicPOT+n)+=sum-triarea/sumarea;
}
//-------------------- modified by ALF at 2008-8-19 end --------------------<
//by sf 090329
/*
#pragma omp critical
{//critical--begin
if (OK_SAV==1) {
CFile f2;
CFileException e2;
//short int index2;
//index2=filepath.FindOneOf(".");
//filepath.SetAt(index2+1,'d');
//filepath.SetAt(index2+2,'p');
//filepath.SetAt(index2+3,'n');
if (iTime0 > 3) {
if (!f2.Open(dataPath+"tour.dpn ",CFile::modeReadWrite, &e2 )) {
f2.Open(dataPath+"tour.dpn ",CFile::modeCreate|CFile::modeReadWrite, &e2 );
}
f2.SeekToEnd();
} else {
if (!f2.Open( dataPath+"tour.dpn ", CFile::modeCreate | CFile::modeWrite, &e2 )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e2.m_cause << "\n";
#endif
}
}
f2.Write(&iTime0,2);
f2.Write(&idpl,2);
f2.Close();
}
if (OK_SAV==1) {
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'d');
//filepath.SetAt(index+2,'p');
//filepath.SetAt(index+3,'l');
if (iTime0 > 3) {
if (!f.Open(dataPath+"tour.dpl ",CFile::modeReadWrite, &e )) {
f.Open(dataPath+"tour.dpl ",CFile::modeCreate|CFile::modeReadWrite, &e );
}
f.SeekToEnd();
} else {
if (!f.Open(dataPath+"tour.dpl ", CFile::modeCreate | CFile::modeWrite, &e )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e.m_cause << "\n";
#endif
}
}
}
f.Write(&iTime0,2);//from line 4070
//f.Write(gatherijk,2*countijk);
//f.Write(gatherdpl,4*countijk);
for(j=0;j<countijk;j=j+3)
{
f.Write(gatherijk+j,2*3);
//f.Write(gatherijk+j+1,2);
//f.Write(gatherijk+j+2,2);
f.Write(gatherdpl+j,4*3);
//f.Write(gatherdpl+j+1,4);
//f.Write(gatherdpl+j+2,4);
}
if (OK_SAV==1) {
f.Close();
}
}//critical--end
*/
//by sf 090408 for dpl[]
// int tmpiTime=iTime0/3;
// gatherallijk[tmpiTime] = (int *) malloc( countijk*sizeof(int) );
// gatheralldpl[tmpiTime] = (float *) malloc( countijk*sizeof(float) );
// *(countallijk+tmpiTime)=countijk;
// for(j=0;j<countijk;j=j+1)
//{
// *(gatherallijk[tmpiTime]+j)=gatherijk[j];
// *(gatheralldpl[tmpiTime]+j)=gatherdpl[j];
// }
// if(iTime0==27)
//{
// printf("27bcasti=%d,myid=%d,ijk=%d,%d,g=%d,%d\n",iTime0,myid,*(gatherallijk[iTime0/3]),*(gatherallijk[iTime0/3]+1),gatherijk[0],gatherijk[1]);
// printf("27bcasti=%d,myid=%d,dpl=%f,%f,%f,%f\n",iTime0,myid,*(gatheralldpl[iTime0/3]),*(gatheralldpl[iTime0/3]+1),gatherdpl[0],gatherdpl[1]);
//};
// by sf 090401 BSPMcal if begin
short int nTimeStep_old=iTime0/3;//nTimeStep=nTimeStep+1;
if ((answer!='s')||(nTimeStep_old<=1)) {
int n = 0;
for(n=0;n<nPos;n++) {
*(POT[n]+nTimeStep_old)=*(POTi_old+n);
}
// add endocardial potential
//printf("iTime=%d,tid=%d,tnum=%d,nTimeStep_old=%d\n",iTime0,omp_get_thread_num(),omp_get_num_threads(),nTimeStep_old);
for(n=0;n<2*NENDO*ND3;n++) {
*(endoPOT[nTimeStep_old-1]+n)=*(endoHnnA_old+n);
}
if(iTime0<=nsnrt) {
//nVCG_old++;//nVCG=nVCG+1;nVCG-->nTimeStep
for(n=0;n<3;n++) {
*(VCGs[n]+nTimeStep_old)=VCG_old[n]/ND; //*(VCGs[n]+nVCG)=VCG[n]/ND;
}
}
}
//bsptimes1[2] = clock();
//printf("%f,%f,bsptimes1[1-0]-bsptimes1[2-1] tid=%d,iTime0=%d\n",(bsptimes1[1]-bsptimes1[0])/CLK_TCK,(bsptimes1[2]-bsptimes1[1])/CLK_TCK,tid,iTime0);
free(endoHnnA_old);free(POTi_old);// by sf 090402-3
free(epicPOTold);
// by sf 090401 if end
free(u1);
free(surfPOTi);
}
// *********** action potential calculation *******
float aptcalm(short int i0,short int j0,short int k0,short int iCell0,short int iTime1) {
short int istp,irsd,lacl,lacl1,iext;
float ACTval;
// ++++ resting potential +++++
ACTval=(float)(*(iparm+NPARM*(iCell0-1)+6));
istp=(short int)(iTime1/3); // each step has 3 time slots
//rdXCTm(istp,i0,j0,k0); //by sf-090401 ,,idlttidltcOpenMP// get idltt = istp - ncyc, the current step in current cycle
//idlttold-->idltt idltcold-->idltc
short int ncyc,n1cyc;
short int i00,j00,k00,icc,idlttold,idltcold;
i00=i0;j00=j0;k00=k0;icc=istp;
idlttold=INFTIME; /*<Comment by ALF> period between 2 continuous excitation*/
idltcold=0; /*<Comment by ALF> delta of 2 periods */
short int n;
long locxct;
locxct=*(locXCT[k00]+j00*NI+i00);
if(locxct<0)
{//return;
}
else
{
for(n=NCYCL-1;n>=0;n--) {
ncyc=*(mapXCTm[n]+locxct);
if (icc>=ncyc) {
idlttold=icc-ncyc;
break;
}
}
if ((n<=0)||(n>=NCYCL-1))
{//return;
}
else
{
n1cyc=*(mapXCTm[n+1]+locxct);
if (n1cyc==INFTIME)
{
}
else
{
idltcold=n1cyc-ncyc-ncyc+*(mapXCTm[n-1]+locxct);
};
};
};
//rdXCTm(istp,i0,j0,k0);---end --by sf
if (idlttold==INFTIME) { // ACTval=-90 situation
return ACTval;
}
irsd=iTime1-istp*3;
idlttold=idlttold*3+irsd;
// iext=*(mapACT[k0]+j0*NI+i0)+idltc * 3 * *(iparm+(iCell0-1)*NPARM+10)/100;
iext=*(mapAPD[k0]+j0*NI+i0)+idltcold * 3 * *(iparm+(iCell0-1)*NPARM+10)/100;
lacl=la0123[iCell0-1]+iext;
if(idlttold>lacl) return ACTval;
lacl1=la012[iCell0-1]+iext;
//TRACE("\naptcalm %d %d %d %d %d %d",idltt,lacl1,la012[iCell0-1],iext,lacl,*(mapAPD[k0]+j0*NI+i0));
if(idlttold>lacl1) {
idlttold-=iext;
ACTval=*(ydata[iCell0-1]+idlttold);
return ACTval;
}
if(idlttold>la012[iCell0-1]) idlttold=la012[iCell0-1];
ACTval=*(ydata[iCell0-1]+idlttold);
return ACTval;
// --- atrial REPolarization ignored ----
// if(iCell0==2) {
// ACTval=(float)(*(iparm+1*NPARM+7));
// return ACTval;
// }
// +++ la012, time to the end of phase 2; +++++
// +++ la0123,time to the end of phase 3 +++++
}
// ECG calculation
void ECGcal(void) {
float ECG[12],ECGr,ECGl,ECGf;
short int iECG[12],i,j;
float wilson;
float eff=(float)26.5730/ND;
// Save ecg data
CFile f;
CFileException e;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'e');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'g');
if (!f.Open( dataPath+"tour.ecg ", CFile::modeCreate | CFile::modeWrite, &e )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e.m_cause << "\n";
#endif
}
//FILE *fptime;//sf
//fptime=fopen(dataPath+"ecg-int-gpu.txt","w") ;//sf
//fprintf(fptime,"%d\n",nTimeStep);//sf
f.Write(&nTimeStep,2);
for (j=1;j<=nTimeStep;j++)
{f.Write(iStep+j,2);
//fprintf(fptime,"%d\n",*(iStep+j));//sf
}
//fclose(fptime);//sf
//fptime=fopen(dataPath+"ecg-float-gpu.txt","w") ;//sf
// Compute ECG
for (i=1;i<=nTimeStep;i++) {
//ECGr=*(POT[nv[0]]+i);
//ECGl=*(POT[nv[1]]+i);
//ECGf=*(POT[nv[2]]+i);
ECGr=*(POT[nv[0]]+i);
ECGl=*(POT[nv[1]]+i);
ECGf=*(POT[nv[2]]+i);
wilson=(ECGr+ECGl+ECGf)/3;
ECG[0]=*(POT[94]+i)-wilson;
ECG[1]=*(POT[96]+i)-wilson;
ECG[2]=*(POT[117]+i)-wilson;
ECG[3]=*(POT[138]+i)-wilson;
ECG[4]=(*(POT[139]+i)/2+*(POT[140]+i)/2)-wilson;
ECG[5]=*(POT[141]+i)-wilson;
ECG[6]=ECGl-ECGr;
ECG[7]=ECGf-ECGr;
ECG[8]=ECGf-ECGl;
ECG[9]=(ECGr-wilson)*3/2;
ECG[10]=(ECGl-wilson)*3/2;
ECG[11]=(ECGf-wilson)*3/2;
iECG[0]=(short int)(eff*ECG[0]);
iECG[1]=(short int)(eff*ECG[1]);
iECG[2]=(short int)(eff*ECG[2]);
iECG[3]=(short int)(eff*ECG[3]);
iECG[4]=(short int)(eff*ECG[4]);
iECG[5]=(short int)(eff*ECG[5]);
iECG[6]=(short int)(eff*ECG[6]);
iECG[7]=(short int)(eff*ECG[7]);
iECG[8]=(short int)(eff*ECG[8]);
iECG[9]=(short int)(eff*ECG[9]);
iECG[10]=(short int)(eff*ECG[10]);
iECG[11]=(short int)(eff*ECG[11]);
for (j=0;j<12;j++) {
f.Write(&iECG[j],2);
//fprintf(fptime,"%d\n",iECG[j]);//sf
}
//TRACE("\n %3d %5d %f %f %f %f ",i, iECG[6], ECG[6], ECGl,ECGr,ECGf);
}
f.Close();
//fclose(fptime);//sf
}
//-------------------- modified by sf at 2008-4-27 begin -------------------->
//modified
int BSPitmmcount(short int iTime0) {
//void BSPitmm(short int iTime0, short int **tnd,float *hnn, float *endoHnnA, float *endoHnnB, float *endoHnnC, float *epicHnn, float *epicPOT) {
//ASSERT(epicHnn != NULL);
//-------------------- modified by ALF at 2008-8-19 end --------------------<
float aptcalm(short int,short int,short int,short int,short int);
void anfct(short int i, short int j, short int k, float v[3]);
int loopcount=0;
char iCell;
const short int OK_SAV=1;
short int iseqx[12]={ -1, 0, 0, 1, 1, 0, 1, 0, 0,-1,-1, 0 };
short int iseqy[12]={ 0,-1, 0,-1, 0, 1, 0, 1, 0, 1, 0,-1 };
short int iseqz[12]={ 0, 0,-1, 0,-1,-1, 0, 0, 1, 0, 1, 1 };
short int nskip=2;
short int i,j,k,l,ix,iy,iz,icell,l6,jx,jy,jz,jcell;
int nsum,n;
int intvl;
int idpl;
float asd,add,rtmax,gsum,compm,compp,compo,ax,ay,az;
float r1,r3,r5,dr,ds,rv3,bx,by,bz,ECGs;
//float der[NL],ders[NL];
double grad[6];
//float dpl[3];
float posi, posj, posk;
float r2,GRD;
float tmpdpl;
// endocardial
int n0,n1,n2,ni;
//float *surfPOTi,*u1;
short int nhb, eTime;
short int index;
for (nhb=0; nhb<nHB; nhb++) {
i=iHB[0][nhb];
j=iHB[1][nhb];
k=iHB[2][nhb];
for (ni=0;ni<mxcycle;ni++) {
eTime=vHB[ni][nhb];
if (eTime==(short int)(iTime0/3)) {
loopcount++;
}
}
}
for (k=0;k<=NK;k+=nskip) { // i,j // < --> <= August 10,1996
for (j=0;j<NJ;j+=nskip) {
for (i=NI;i>-1;i-=nskip) {
if (k<*(kmin+NI*j+i) || k>*(kmax+NI*j+i)) {
continue;
}
iCell=*(mapCell[k]+NI*j+i);
// +++++++++ special fiber neglected +++++
if (iCell<=1) continue; /*<Comment by ALF> null or SN*/
if (iCell>=15) continue; /*<Comment by ALF> out of define*/
// include fiber conduction
if((iCell>=3)&&(iCell<=6)) continue; /*<Comment by ALF> not AVN HB BB PKJ*/
compo=aptcalm(i,j,k,iCell,iTime0);
// --------- neighberhood search ---------
gsum=(float)0;
for (l=0;l<6;l++) {
compm=(float)0.0;
compp=(float)0.0;
grad[l]=(double)0.0;
ix=i+iseqx[l];
iy=j+iseqy[l];
iz=k+iseqz[l];
if ((ix>=0)&&(ix<NI)&&(iy>=0)&&(iy<NJ)&&(iz>=0)&&(iz<NK)) {
icell=*(mapCell[iz]+iy*NI+ix);
if ((icell>1)&&(icell<15)&&((icell<3)||(icell>6))) {
compm=aptcalm(ix,iy,iz,icell,iTime0);
//if (iTime0 ==3 && compm != -90.) TRACE("\nB %d %d %d %f",ix,iy,iz, compm);
grad[l]+=compm-compo;
}
}
l6=l+6; /*<Comment by ALF> opposite one*/
jx=i+iseqx[l6];
jy=j+iseqy[l6];
jz=k+iseqz[l6];
if ((jx>=0)&&(jx<NI)&&(jy>=0)&&(jy<NJ)&&(jz>=0)&&(jz<NK)) {
jcell=*(mapCell[jz]+jy*NI+jx);
if ((jcell>1)&&(jcell<15)&&((jcell<3)||(jcell>6))) {
compp=aptcalm(jx,jy,jz,jcell,iTime0);
grad[l]+=compo-compp;
}
}
}
for (l=0;l<6;l++)
gsum+=(float)fabs((double)grad[l]);
if (gsum==0) continue;
loopcount++;
}
}
}
return loopcount;
}
#if __DEVICE_EMULATION__
bool InitCUDA(void){return true;}
#else
bool InitCUDA(void)
{
int count = 0;
int i = 0;
hipGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
hipSetDevice(i);
printf("CUDA initialized.\n");
return true;
}
#endif
/************************************************************************/
/* Example */
/************************************************************************/
__global__ static void k_dpl_Nepic(short int *k_epicX,short int *k_epicY,short int *k_epicZ,float k_posi,float k_posj,float k_posk,
float k_HRTx0,float k_HRTy0,float k_HRTz0,float *k_dpl,float *k_epicPOTold,
float *k_tm,short int k_Nepic)
{
float ax,ay,az,r1,r2,r3,dr,rv3,tmp1,tmp2,tmp3;
int n=blockDim.x * blockIdx.x + threadIdx.x;
if (n< k_Nepic)
{ //for (n=0; n<Nepic; ++n) {
//ax=HRTx0+epicX[n]*tmswf[0][0]+epicY[n]*tmswf[0][1]+epicZ[n]*tmswf[0][2]-posi;
//ay=HRTy0+epicX[n]*tmswf[1][0]+epicY[n]*tmswf[1][1]+epicZ[n]*tmswf[1][2]-posj;
//az=HRTz0+epicX[n]*tmswf[2][0]+epicY[n]*tmswf[2][1]+epicZ[n]*tmswf[2][2]-posk;
ax=k_HRTx0;
tmp1=*(k_epicX+n) * *(k_tm);
ax=ax+tmp1;
tmp2=*(k_epicY+n) * *(k_tm+1);
ax=ax+tmp2;
tmp3=*(k_epicZ+n) * *(k_tm+2);
ax=ax+tmp3;
ax=ax-k_posi;
ay=k_HRTy0;
tmp1=*(k_epicX+n) * *(k_tm+1*6);
ay=ay+tmp1;
tmp2=*(k_epicY+n) * *(k_tm+1*6+1);
ay=ay+tmp2;
tmp3=*(k_epicZ+n) * *(k_tm+1*6+2);
ay=ay+tmp3;
ay=ay-k_posj;
az=k_HRTz0;
tmp1=*(k_epicX+n) * *(k_tm+2*6);
az=az+tmp1;
tmp2=*(k_epicY+n) * *(k_tm+2*6+1);
az=az+tmp2;
tmp3=*(k_epicZ+n) * *(k_tm+2*6+2);
az=az+tmp3;
az=az-k_posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
//dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
tmp1=k_dpl[0]*ax;
dr=tmp1;
tmp2=k_dpl[1]*ay;
dr+=tmp2;
tmp3=k_dpl[2]*az;
dr+=tmp3;
rv3=1/r3;
*(k_epicPOTold+n)+=dr*rv3;
}
}
__global__ static void k_dpl_Nendo(float k_posi,float k_posj,float k_posk,
float k_HRTx0,float k_HRTy0,float k_HRTz0,int k_NendoB,int k_offset,float *k_dpl,
float *k_endoHnnA,short int *k_endoBx,short int *k_endoBy,short int *k_endoBz,
float *k_tm)
{
float ax,ay,az,r1,r2,r3,dr,rv3,tmp1,tmp2,tmp3;
int n=blockDim.x * blockIdx.x + threadIdx.x;
if (n< k_NendoB)
{
//ax=k_HRTx0+*(k_endoBx+n) * *(k_tm)+*(k_endoBy+n) * *(k_tm+1)+*(k_endoBz+n) * *(k_tm+2)-k_posi;
//ay=k_HRTy0+*(k_endoBx+n) * *(k_tm+1*6)+*(k_endoBy+n) * *(k_tm+1*6+1)+*(k_endoBz+n) * *(k_tm+1*6+2)-k_posj;
//az=k_HRTz0+*(k_endoBx+n) * *(k_tm+2*6)+*(k_endoBy+n) * *(k_tm+2*6+1)+*(k_endoBz+n) * *(k_tm+2*6+2)-k_posk;
ax=k_HRTx0;
tmp1=*(k_endoBx+n) * *(k_tm);
ax=ax+tmp1;
tmp2=*(k_endoBy+n) * *(k_tm+1);
ax=ax+tmp2;
tmp3=*(k_endoBz+n) * *(k_tm+2);
ax=ax+tmp3;
ax=ax-k_posi;
ay=k_HRTy0;
tmp1=*(k_endoBx+n) * *(k_tm+1*6);
ay=ay+tmp1;
tmp2=*(k_endoBy+n) * *(k_tm+1*6+1);
ay=ay+tmp2;
tmp3=*(k_endoBz+n) * *(k_tm+1*6+2);
ay=ay+tmp3;
ay=ay-k_posj;
az=k_HRTz0;
tmp1=*(k_endoBx+n) * *(k_tm+2*6);
az=az+tmp1;
tmp2=*(k_endoBy+n) * *(k_tm+2*6+1);
az=az+tmp2;
tmp3=*(k_endoBz+n) * *(k_tm+2*6+2);
az=az+tmp3;
az=az-k_posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
//dr=k_dpl[0]*ax+k_dpl[1]*ay+k_dpl[2]*az;
tmp1=k_dpl[0]*ax;
dr=tmp1;
tmp2=k_dpl[1]*ay;
dr+=tmp2;
tmp3=k_dpl[2]*az;
dr+=tmp3;
rv3=1/r3;
*(k_endoHnnA+k_offset+n)+=dr*rv3;
};
}
__global__ static void k_dpl_nPos_2(float k_posi,float k_posj,float k_posk,float *k_dpl,float *k_r,float *d_surfPOTi,
short int *d_tnd)
{
float ax,ay,az,r1,r2,r3,dr,rv3;
int n0,n1,n2;
int n=blockDim.x * blockIdx.x + threadIdx.x;
//if (n< ((NL-2)*2))
//{
n0=d_tnd[n]-1;
n1=d_tnd[(NL-2)*2+n]-1;
n2=d_tnd[(NL-2)*2*2+n]-1;
ax=(k_r[n0]+k_r[n1]+k_r[n2])/3-k_posi;
ay=(k_r[NL+n0]+k_r[NL+n1]+k_r[NL+n2])/3-k_posj;
az=(k_r[2*NL+n0]+k_r[2*NL+n1]+k_r[2*NL+n2])/3-k_posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=ax;
dr=dr*k_dpl[0];
dr+=k_dpl[1]*ay;
dr+=k_dpl[2]*az;
rv3=1/r3;
*(d_surfPOTi+n)+=dr*rv3;
//};
}
__global__ void k_dpl_nPos(float k_posi,float k_posj,float k_posk,int k_nPos,float *k_dpl,
float *k_POTi,float *k_der,float *k_r ,float *k_rn )
{
float ax,ay,az,r1,r2,r3,r5,dr,ds,rv3,bx,by,bz,ret_der,ret_POTi;
int n=threadIdx.x;
ax=k_r[n];
ay=k_r[NL+n];
az=k_r[2*NL+n];
ax = ax - k_posi;
ay = ay - k_posj;
az = az - k_posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
r5=(float)(r2*r3);
dr=k_dpl[0]*ax+k_dpl[1]*ay+k_dpl[2]*az;
ds=3*dr/r5;
rv3=1/r3;
bx=k_dpl[0]*rv3-ax*ds;
by=k_dpl[1]*rv3-ay*ds;
bz=k_dpl[2]*rv3-az*ds;
//*(k_der+n)+=*(d_rn[0]+n)*bx+*(d_rn[1]+n)*by+*(d_rn[2]+n)*bz;
ret_der = k_der[n];
ret_der += k_rn[n]*bx;
ret_der += k_rn[NL+n]*by;
ret_der += k_rn[2*NL+n]*bz;
k_der[n] = ret_der;
//*(k_POTi+n)+=dr*rv3;
ret_POTi = k_POTi[n];
ret_POTi += dr*rv3;
k_POTi[n] = ret_POTi;
//__syncthreads();
}
extern "C" void gpu_freetransdata()
{
(hipFree(d_tm));
(hipFree(d_endoBx));(hipFree(d_endoBy));(hipFree(d_endoBz));
(hipFree(d_endoCx));(hipFree(d_endoCy));(hipFree(d_endoCz));
(hipFree(d_r));
(hipFree(d_rn));
(hipFree(d_tnd));
}
//int main(int argc, char** argv)
extern "C" short int cudamain(int argc, char** argv)
{//int i;
fprintf(stdout, "before \n");
fflush(stdout);
if(!InitCUDA()) {
return 0;
}
fflush(stdout);
int count = 0;
short int GPUnumber;
hipGetDeviceCount(&count);
GPUnumber=count;
//hpc(argc, argv);
printf("CUDA is OK=%d\n",GPUnumber);
return GPUnumber;
//for(i=0;i<3;i++)
// {
// //(hipFree(d_r[i]));(hipFree(d_rn[i]));
// (hipFree(d_tnd[i]))
//};
/* gpu_freetransdata();//These function should be called by one process on one PC
(hipFree(d_POTi));(hipFree(d_der));
(hipFree(d_endoHnnA));(hipFree(d_surfPOTi));
CUT_EXIT(argc, argv);
*/
}
extern "C" void gpu_transdata(short int g_epicX[Nepic],short int g_epicY[Nepic],short int g_epicZ[Nepic],short int *g_tnd[3],float *g_r[3],float *g_rn[3],short int g_endoBx[NENDO*ND3],short int g_endoBy[NENDO*ND3],short int g_endoBz[NENDO*ND3],short int g_endoCx[NENDO*ND3],short int g_endoCy[NENDO*ND3],short int g_endoCz[NENDO*ND3],float g_tm[3][6])
{ //,;
int i,j;
//float *d_r[3],*d_rn[3],*d_tm;
float cg_r[NL*3],cg_rn[NL*3];
//if(!InitCUDA()) {
//printf("CUDA error");
// //return 0;
//}
for(i=0;i<3;i++)
for(j=0;j<NL;j++)
{
cg_r[i*NL+j]=*(g_r[i]+j);
cg_rn[i*NL+j]=*(g_rn[i]+j);
}
( hipMalloc((void**) &d_r, sizeof(float) * NL*3));
( hipMemcpy(d_r, cg_r, sizeof(float) * NL*3, hipMemcpyHostToDevice));
( hipMalloc((void**) &d_rn, sizeof(float) * NL*3));
( hipMemcpy(d_rn, cg_rn, sizeof(float) * NL*3, hipMemcpyHostToDevice));
short int cg_tnd[(NL-2)*2*3];
for(i=0;i<3;i++)
for(j=0;j<(NL-2)*2;j++)
{
cg_tnd[i*(NL-2)*2+j]=*(g_tnd[i]+j);
}
( hipMalloc((void**) &d_tnd, sizeof(short int) * (NL-2)*2*3));
( hipMemcpy(d_tnd, cg_tnd, sizeof(short int) * (NL-2)*2*3, hipMemcpyHostToDevice));
//for(i=0;i<3;i++)
//{
// //( hipMalloc((void**) &d_r[i], sizeof(float) * NL));
// //( hipMemcpy((d_r[i]), (g_r[i]), sizeof(float) * NL, hipMemcpyHostToDevice));
// //( hipMalloc((void**) &d_rn[i], sizeof(float) * NL));
// //( hipMemcpy((d_rn[i]), (g_rn[i]), sizeof(float) * NL, hipMemcpyHostToDevice));
// ( hipMalloc((void**) &d_tnd[i], sizeof(short int) * (NL-2)*2));
// ( hipMemcpy((d_tnd[i]), (g_tnd[i]), sizeof(short int) * (NL-2)*2, hipMemcpyHostToDevice));
//};
float cg_tm[3*6];
for(i=0;i<3;i++)
for(j=0;j<6;j++)
{
cg_tm[i*6+j]=*(g_tm[i]+j);
}
( hipMalloc((void**) &d_tm, sizeof(float) * 3 * 6));
( hipMemcpy(d_tm, cg_tm, (sizeof(float) * 3 * 6), hipMemcpyHostToDevice));
( hipMalloc((void**) &d_epicX, sizeof(short int) * Nepic));
( hipMalloc((void**) &d_epicY, sizeof(short int) * Nepic));
( hipMalloc((void**) &d_epicZ, sizeof(short int) * Nepic));
( hipMemcpy((d_epicX),(g_epicX) , (sizeof(short int) * Nepic), hipMemcpyHostToDevice));
( hipMemcpy(d_epicY,g_epicY , sizeof(short int) * Nepic, hipMemcpyHostToDevice));
( hipMemcpy(d_epicZ, g_epicZ, sizeof(short int) * Nepic, hipMemcpyHostToDevice));
( hipMalloc((void**) &d_endoBx, sizeof(short int) * NENDO*ND3));
( hipMalloc((void**) &d_endoBy, sizeof(short int) * NENDO*ND3));
( hipMalloc((void**) &d_endoBz, sizeof(short int) * NENDO*ND3));
( hipMalloc((void**) &d_endoCx, sizeof(short int) * NENDO*ND3));
( hipMalloc((void**) &d_endoCy, sizeof(short int) * NENDO*ND3));
( hipMalloc((void**) &d_endoCz, sizeof(short int) * NENDO*ND3));
( hipMemcpy((d_endoBx),(g_endoBx) , (sizeof(short int) * NENDO*ND3), hipMemcpyHostToDevice));
( hipMemcpy(d_endoBy,g_endoBy , sizeof(short int) * NENDO*ND3, hipMemcpyHostToDevice));
( hipMemcpy(d_endoBz, g_endoBz, sizeof(short int) * NENDO*ND3, hipMemcpyHostToDevice));
( hipMemcpy(d_endoCx,g_endoCx , sizeof(short int) * NENDO*ND3, hipMemcpyHostToDevice));
( hipMemcpy(d_endoCy,g_endoCy , sizeof(short int) * NENDO*ND3, hipMemcpyHostToDevice));
( hipMemcpy(d_endoCz,g_endoCz , sizeof(short int) * NENDO*ND3, hipMemcpyHostToDevice));
//,
( hipMalloc((void**) &d_epicPOTold, sizeof(float) * Nepic));
( hipMalloc((void**) &d_POTi, sizeof(float) * NL));
( hipMalloc((void**) &d_der, sizeof(float) * NL));
( hipMalloc((void**) &d_endoHnnA, sizeof(float) * 2*NENDO*ND3));
( hipMalloc((void**) &d_surfPOTi, sizeof(float) * (NL-2)*2));
}
//extern "C" void gpu_BSPitmm_Malloc(float *g_POTi,float g_der[NL],float *g_endoHnnA,float *g_surfPOTi)
//{
// ( hipMalloc((void**) &d_epicPOTold, sizeof(float) * Nepic));
// ( hipMalloc((void**) &d_POTi, sizeof(float) * NL));
// ( hipMalloc((void**) &d_der, sizeof(float) * NL));
// ( hipMalloc((void**) &d_endoHnnA, sizeof(float) * 2*NENDO*ND3));
// ( hipMalloc((void**) &d_surfPOTi, sizeof(float) * (NL-2)*2));
//}
extern "C" void gpu_BSPitmm_HostToDevice(float *g_POTi,float g_der[NL],float *g_endoHnnA,float *g_surfPOTi)
{
hipMemset(d_epicPOTold, 0, sizeof(float) * Nepic);
hipMemset(d_POTi, 0, sizeof(float) * NL);
hipMemset(d_der, 0, sizeof(float) * NL);
hipMemset(d_endoHnnA, 0, sizeof(float) * 2*NENDO*ND3);
hipMemset(d_surfPOTi, 0, sizeof(float) * (NL-2)*2);
//( hipMemcpy((d_POTi), (g_POTi), sizeof(float) * NL, hipMemcpyHostToDevice));
//( hipMemcpy((d_der), (g_der), sizeof(float) * NL, hipMemcpyHostToDevice));
//( hipMemcpy((d_endoHnnA), (g_endoHnnA), sizeof(float) * 2*NENDO*ND3, hipMemcpyHostToDevice));
//( hipMemcpy((d_surfPOTi), (g_surfPOTi), sizeof(float) * (NL-2)*2, hipMemcpyHostToDevice));
}
extern "C" void gpu_BSPitmm_DeviceToHost(float *g_epicPOTold,float *g_POTi,float g_der[NL],float *g_endoHnnA,float *g_surfPOTi)
{
( hipMemcpy((g_epicPOTold), (d_epicPOTold), sizeof(float) * Nepic, hipMemcpyDeviceToHost));
( hipMemcpy((g_POTi), (d_POTi), sizeof(float) * NL, hipMemcpyDeviceToHost));
( hipMemcpy((g_der), (d_der), sizeof(float) * NL, hipMemcpyDeviceToHost));
( hipMemcpy((g_endoHnnA),(d_endoHnnA) , sizeof(float) * 2*NENDO*ND3, hipMemcpyDeviceToHost));
( hipMemcpy((g_surfPOTi),(d_surfPOTi) , sizeof(float) * (NL-2)*2, hipMemcpyDeviceToHost));
}
extern "C" void gpu_dpl_all(short int do_epicPOT,float g_posi,float g_posj,float g_posk,short int g_nPos,float g_dpl[3],float *g_POTi,float g_der[NL],
float g_HRTx0,float g_HRTy0,float g_HRTz0,int g_NendoB,int g_NendoC,
float *g_endoHnnA,short int *g_endoBx,short int *g_endoBy,short int *g_endoBz,float g_tm[3][6],float *g_epicPOTold)
{
float * d_dpl;
( hipMalloc((void**) &d_dpl, sizeof(float) * 3));
( hipMemcpy(d_dpl, g_dpl, sizeof(float) * 3, hipMemcpyHostToDevice));
k_dpl_nPos<<<1, g_nPos>>>(g_posi,g_posj,g_posk,g_nPos,d_dpl,d_POTi,d_der,d_r ,d_rn);
//if (g_offset<100)
//{
k_dpl_Nendo<<<6, 512>>>(g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,g_NendoB,0,d_dpl,d_endoHnnA,d_endoBx,d_endoBy,d_endoBz,d_tm);
//}
//else
//{
k_dpl_Nendo<<<6, 512>>>(g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,g_NendoC,g_NendoB,d_dpl,d_endoHnnA,d_endoCx,d_endoCy,d_endoCz,d_tm);
//};
k_dpl_nPos_2<<<2, 342>>>(g_posi,g_posj,g_posk,d_dpl,d_r,d_surfPOTi,d_tnd);
if (do_epicPOT==1) k_dpl_Nepic<<<Nepic/512+1, 512>>>(d_epicX,d_epicY,d_epicZ,g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,d_dpl,d_epicPOTold,d_tm,Nepic);
(hipFree(d_dpl));
}
extern "C" void gpu_dpl_Nepic(float g_posi,float g_posj,float g_posk,float g_HRTx0,float g_HRTy0,float g_HRTz0,
float g_dpl[3],float g_tm[3][6],float *g_epicPOTold)
{
float * d_dpl;
( hipMalloc((void**) &d_dpl, sizeof(float) * 3));
( hipMemcpy(d_dpl, g_dpl, sizeof(float) * 3, hipMemcpyHostToDevice));
k_dpl_Nepic<<<Nepic/512+1, 512>>>(d_epicX,d_epicY,d_epicZ,g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,d_dpl,d_epicPOTold,d_tm,Nepic);
(hipFree(d_dpl));
}
extern "C" void gpu_dpl_Nendo(float g_posi,float g_posj,float g_posk,float g_HRTx0,float g_HRTy0,float g_HRTz0,
int g_NendoBC,int g_offset,float g_dpl[3],float *g_endoHnnA,
short int *g_endoBx,short int *g_endoBy,short int *g_endoBz,float g_tm[3][6])
{
//k_dpl_Nendo<<<1, g_NendoBC>>>(g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,g_NendoBC,g_offset,g_dpl,d_endoHnnA,d_endoBx,d_endoBy,d_endoBz,d_tm);
// numberofb=g_NendoBC;
//while(g_NendoBC!=0)
float * d_dpl;
( hipMalloc((void**) &d_dpl, sizeof(float) * 3));
( hipMemcpy(d_dpl, g_dpl, sizeof(float) * 3, hipMemcpyHostToDevice));
if (g_offset<100)
{
k_dpl_Nendo<<<6, 512>>>(g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,g_NendoBC,g_offset,d_dpl,d_endoHnnA,d_endoBx,d_endoBy,d_endoBz,d_tm);
}
else
{ k_dpl_Nendo<<<6, 512>>>(g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,g_NendoBC,g_offset,d_dpl,d_endoHnnA,d_endoCx,d_endoCy,d_endoCz,d_tm);
};
(hipFree(d_dpl));
//k_dpl_Nendo<<<1, (g_NendoBC-512*5)>>>(g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,g_NendoBC,(g_offset+512*5),g_dpl,d_endoHnnA,d_endoBx,d_endoBy,d_endoBz,d_tm);
}
extern "C" void gpu_dpl_nPos_2(float g_posi,float g_posj,float g_posk,float g_dpl[3])
{
float * d_dpl;
( hipMalloc((void**) &d_dpl, sizeof(float) * 3));
( hipMemcpy(d_dpl, g_dpl, sizeof(float) * 3, hipMemcpyHostToDevice));
k_dpl_nPos_2<<<2, 342>>>(g_posi,g_posj,g_posk,d_dpl,d_r,d_surfPOTi,d_tnd);
(hipFree(d_dpl));
}
extern "C" void gpu_dpl_nPos(float g_posi,float g_posj,float g_posk,short int g_nPos,float g_dpl[3],float *g_POTi,float g_der[NL])
{
float * d_dpl;
( hipMalloc((void**) &d_dpl, sizeof(float) * 3));
( hipMemcpy(d_dpl, g_dpl, sizeof(float) * 3, hipMemcpyHostToDevice));
//float *d_POTi=0, *d_der=0;
// ( hipMalloc((void**) &d_POTi, sizeof(float) * NL));
// ( hipMalloc((void**) &d_der, sizeof(float) * NL));
// ( hipMemcpy((d_POTi), (g_POTi), sizeof(float) * NL, hipMemcpyHostToDevice));
// ( hipMemcpy((d_der), (g_der), sizeof(float) * NL, hipMemcpyHostToDevice));
k_dpl_nPos<<<1, g_nPos>>>(g_posi,g_posj,g_posk,g_nPos,d_dpl,d_POTi,d_der,d_r ,d_rn);
(hipFree(d_dpl));
//k_dpl_nPos<<<1, g_nPos>>>(g_posi,g_posj,g_posk,g_nPos,g_dpl,d_POTi,d_der,d_r,d_rn);
//( hipMemcpy((g_POTi), (d_POTi), sizeof(float) * NL, hipMemcpyDeviceToHost));
//( hipMemcpy((g_der), (d_der), sizeof(float) * NL, hipMemcpyDeviceToHost));
//(hipFree(d_der));
//(hipFree(d_POTi));
//extern "C" void dplpro(float *POTi,const short int NL, const float **r)
// float *d_data=0,*d_r[3],;
// printf("%f,%f\n", *POTi,*(POTi+1));
// for(int i=0;i<3,i++) ( hipMalloc((void**) &d_data, sizeof(float) * NL*4));
// ( hipMalloc((void**) &d_data, sizeof(float) * NL*4));
// ( hipMemcpy(d_data,POTi , sizeof(float) * NL*4, hipMemcpyHostToDevice));
// dpl<<<1, 16>>>(d_data);
// ( hipMemcpy(POTi, d_data, sizeof(float) * NL*4, hipMemcpyDeviceToHost));
// printf("%f,%f\n", *POTi,*(POTi+1));
//
//
}
void XCTcalm(void) {
// FILE *fp;
void wtXCTm(short int,short int,short int,short int);
void bbDLYm(short int,short int,short int);
void rdXCTm(short int,short int,short int,short int);
short int itmp, tmp;
short int iStm,ires,irp,irel,ist,kBB;
float phsft,mxDLY,mACCl,icross,delt;
char mCell,iCell,kCell;
short int *iACTv[4];
short int *iACTvOld[4];
short int *jACTv[4];
short int *kACTv[4];
short int *iXCT[NK];
short int *iXCTapd[NK];
short int *iXCTOld[NK];
short int iseqx[12]={-1,-1, 0, 0, 1, 0, 1, 1, 0, 0,-1, 0};
short int iseqy[12]={ 0, 1, 1, 0, 0, 1, 0,-1,-1, 0, 0,-1};
short int iseqz[12]={ 0, 0, 0,-1,-1,-1, 0, 0, 0, 1, 1, 1};
short int ix,iy,iz,jx,jy,jz,iv,l;
short int jdist,jx0,jy0,jz0,is,ICL,ivel;
short int iSTOP, iS1S2, dS1S2Old, iCell5Ex;
long i,j,k,nACTv,mACTv,nACTvOld;
long nblck,nStep,nbrch;
// >>>>>>> aniso >>>>>>
float xani,yani,zani,dani,elp;
float dxani,dyani,dzani;
short int itms1=0;
// ---- for vtr aniso use
// storing the ellipsoid propagation times ---
//--------- maximum excitation time Step: maxXctStep -------------
for(i=0;i<4;i++) {
iACTv[i] = (short int *) malloc(50000*ND3*2);
iACTvOld[i] = (short int *) malloc(50000*ND3*2);
jACTv[i] = (short int *) malloc(50000*ND3*2);
kACTv[i] = (short int *) malloc(50000*ND3*2);
if((iACTv[i]==NULL)||(iACTvOld[i]==NULL)||
(jACTv[i]==NULL)||(kACTv[i]==NULL)) {
MessageBox(NULL,"Out of memory !",NULL,MB_OK);
return;
}
}
for(i=0;i<NK;i++) {
iXCT[i] = (short int *) malloc(NI*NJ*2);
iXCTapd[i] = (short int *) malloc(NI*NJ*2);
iXCTOld[i] = (short int *) malloc(NI*NJ*2);
if((iXCT[i]==NULL)||(iXCTOld[i]==NULL)) {
MessageBox(NULL,"Out of memory !",NULL,MB_OK);
return;
}
}
for(i=0;i<4;i++) {
for(j=0;j<50000*ND3;j++) {
*(iACTv[i]+j)=0;
*(iACTvOld[i]+j)=0;
*(jACTv[i]+j)=0;
*(kACTv[i]+j)=0;
}
}
// --- file mapXCT is initialized with INFTIME ----
for(i=0;i<NCYCL;i++) {
for(j=0;j<50000*ND3;j++) {
*(mapXCTm[i]+j)=INFTIME;
}
}
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=0;i<NI;i++) {
*(iXCT[k]+j*NI+i)=INFTIME;
*(iXCTapd[k]+j*NI+i)=0;
*(iXCTOld[k]+j*NI+i)=INFTIME;
}
}
}
mxcycle=0;
short int tested[NCELL];
for(i=0;i<NCELL;i++)
tested[i]=0;
for(i=0;i<nttl;i++) {
jx=ipttl[0][i]; /*<Comment by ALF> pos of ith cell*/
jy=ipttl[1][i];
jz=ipttl[2][i];
iCell=*(mapCell[jz]+jy*NI+jx); /*<Comment by ALF> cell type index */
if(tested[iCell-1]==0)
{*(iparm+(iCell-1)*NPARM+18)+=ipttl[3][i];tested[iCell-1]=1;
if (iCell!=1) {*(iparm+(1-1)*NPARM+18)+=ipttl[3][i];//maxXctStep+=ipttl[3][i];
}
}
//TRACE("\nNTTL (%3d %3d %3d) %2d",jx,jy,jz,iCell);
// set pacemake time of no. 5 cells
if (iCell==5) {
ipstm[0][i]=100*ND/(ipttl[3][i]+1);
if((ipstm[0][i]*ipttl[3][i])<100*ND) ipstm[0][i]+=1;
//ipstm[0][i]=100/(ipttl[3][i]+1);
//if((ipstm[0][i]*ipttl[3][i])<100) ipstm[0][i]+=1;
//TRACE("\nCell 5, (%d %d %d) %d %d",jx,jy,jz, ipttl[3][i],ipstm[0][i]);
continue;
}
// iparm(n,18) = BCL basic cycle length (ms) of pacing
// iparm(n,20) = inc increament of BCL(ms/cycle)
ipstm[0][i]=*(iparm+(iCell-1)*NPARM+17);
ipstm[1][i]=*(iparm+(iCell-1)*NPARM+19);
ipstm[2][i]=0;
}
nblck=0;
ic=0;
nACTv=0;
iS1S2=0;
iCell5Ex=0;
// ------ stimulus: pacemaker spontanous firing -------
while (1) {
// In this loop, ipttl[3][i] is mainly used to
// decide ipstm[0][i] and itself
jx=0;
jy=0;
jz=0;
iStm=0;
excited=0;
for (i=0;i<nttl;i++) {
jx=ipttl[0][i];
jy=ipttl[1][i];
jz=ipttl[2][i];
iStm=ipttl[3][i];
iCell=*(mapCell[jz]+jy*NI+jx);
//TRACE("\nStimulus (%3d %3d %3d)%2d %d %d",jx,jy,jz,iCell,iStm, mxcycle);
//TRACE("\nbreak1 mxcycle=%d NCYCL=%d ic=%d iCell=%d, iStm=%d, mS2BN=%d,ipstm=%d",mxcycle, NCYCL,ic, iCell, iStm,*(iparm+(iCell-1)*NPARM+18),ipstm[0][i]);
if (iCell==5) continue; // ignore BB
if (iStm != ic) continue;
// ic: i-th time Step
// nACTv: number of exitation cells at ic time but cellType != 5 (BB)
// --- end ---
//TRACE("\nbreak1 mxcycle=%d NCYCL=%d ic=%d iCell=%d, iStm=%d",mxcycle, NCYCL,ic, iCell, iStm);
nACTv=nACTv+1;
*(iACTv[0]+nACTv)=jx;
*(iACTv[1]+nACTv)=jy;
*(iACTv[2]+nACTv)=jz;
*(iACTv[3]+nACTv)=*(iparm+(iCell-1)*NPARM+31); /*<Comment by ALF> iparm store each cell's parameters*/
// iparm(n,32): conduction speed
wtXCTm(ic,jx,jy,jz);
//if (jx==101 && jy==77 && jz==6) TRACE("\nA mxcycle=%d at ic=%d, iCell=%d",mxcycle,ic,iCell);
//if (iCell <3) TRACE("\nA %d %d %d %d %d %d",iCell,jx,jy,jz,ic,nACTv);
// write to file
// mxcycle: maximum cycle
if(mxcycle>=NCYCL) {
break;
}
// --- store current time to iXCT and last time to iXCTOld -->
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx); // init is INFTIME
*(iXCT[jz]+jy*NI+jx)=ic;
excited=1;
// Update ipttl[3][i]
// iparm(n,18) = BCL: basic cycle length (ms) of pacing
// Normally, only SN has this parameter > 0
/*if(*(iparm+(iCell-1)*NPARM+17)>0) {
if ((iS1S2==1) && (mS2BN>1)) {
itmp=ipttl[3][i]+mS2CL;
mS2BN--;
} else {
itmp=ipttl[3][i]+ipstm[0][i];
}
dS1S2Old=ipstm[2][i];
ipstm[0][i] = ipstm[0][i] + ipstm[1][i];
iCell5Ex=0;
// ipstm[1][i] is the step
// iparm(n,19) = pBN: beat number
// judge by ipttl[3][i]
if(itmp>*(iparm+(iCell-1)*NPARM+18)) continue;
if ((mS2ST/3 > ipttl[3][i]) &&(mS2ST/3 < itmp)) {
ipttl[3][i]=(short int)(mS2ST/3);
iS1S2=1;
} else {
ipttl[3][i]=itmp;
}
ipstm[2][i]=itmp-ipttl[3][i];
//TRACE("\nTime=%d, %d, %d, %d, %d %d",ic,itmp,ipttl[3][i],dS1S2Old, ipstm[0][i],ipstm[1][i]);
continue;
}*/
if(*(iparm+(iCell-1)*NPARM+17)>0) {
if (iCell==1) {
itmp=ipttl[3][i]+ipstm[0][i];
dS1S2Old=ipstm[2][i];
ipstm[0][i] = ipstm[0][i] + ipstm[1][i];
iCell5Ex=0;
if(itmp>*(iparm+(iCell-1)*NPARM+18)) continue;
ipttl[3][i]=itmp; continue;}
else
{
itmp=ipttl[3][i]+ipstm[0][i];
dS1S2Old=ipstm[2][i];
ipstm[0][i] = ipstm[0][i] + ipstm[1][i];
iCell5Ex=0;
if(itmp>*(iparm+(iCell-1)*NPARM+18)-ipstm[0][i]+3) continue;
ipttl[3][i]=itmp;
}
continue;
}
// iparm(n,24) = ICL: intrinsic cycle length(ms)
ipttl[3][i] = ipttl[3][i] + *(iparm+(iCell-1)*NPARM+23);
}
// ---- display the excitation number ----
// go to next Step
nblck = nblck + nACTv;
//TRACE("\nmxcycle =%d Step=%3d, number=%ld nblck=%ld ",mxcycle,ic,nACTv, nblck);
ic = ic + 1;
//TRACE("\nbreak2 ic=%d maxXctStep=%d ",ic, maxXctStep);
if (ic>=maxXctStep) break;
if (nACTv == 0) continue;
/**
* very important
*/
// --------- propagation (2000)------------>
nACTvOld=0;
// nACTv: at moment t, the number of excited cells
for (i=1;i<=nACTv;i++) {
excited=1;
ix=*(iACTv[0]+i);
iy=*(iACTv[1]+i);
iz=*(iACTv[2]+i);
iv=*(iACTv[3]+i);
iCell=*(mapCell[iz]+iy*NI+ix);
//if (ix == 64 && iy == 50 && iz == 64) TRACE("\nB AVN %d",iCell);
//----------- low conduction speed part ----------->
// iparm(n,32): conduction speed
if (*(iparm+(iCell-1)*NPARM+31)<=0) continue;
if (iCell==5) iCell5Ex=1;
//if (iCell==8) TRACE("\nCell=8 %d, %d, %d, ic=%d, %d %d",ix,iy,iz,ic,iv,mBCL);
// 100 = Conduction Speed of ATR?
if (iv<100) {
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=ix;
*(iACTvOld[1]+nACTvOld)=iy;
*(iACTvOld[2]+nACTvOld)=iz;
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)+*(mapSpeed[iz]+iy*NI+ix); //added by zhu
if (iCell==5) {
/*ibbDLY=0;
bbDLYm(ix,iy,iz);
if (ibbDLY>0)
*(iACTvOld[3]+nACTvOld)=iv+ibbDLY;
TRACE("\nBB, %d",*(iACTvOld[3]+nACTvOld));
*/
ibbDLY=0;
// Add for BB interval by hui wang
ibbSTEP=0;
bbDLYm(ix,iy,iz);
// End of add for BB interval by hui wang, modified by zhu
if (ibbDLY>0) {ibbSTEP+=nbbSTEP;ibbDLY=100*ND/(ibbSTEP+1);}
if(ibbDLY>0 && (ibbDLY*ibbSTEP)<100*ND) ibbDLY+=1;
if (ibbDLY>0)
*(iACTvOld[3]+nACTvOld)=iv+ibbDLY;
else
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31);
continue;
}
/*if (iCell==3 || iCell==6) {
if (*(iXCTOld[iz]+iy*NI+ix)==INFTIME)
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31);
else {
irel = *(iXCT[iz]+iy*NI+ix)-*(iXCTOld[iz]+iy*NI+ix)-(*(iparm+NPARM*(iCell-1)+4)+*(mapAPD[iz]+iy*NI+ix))/3;
//irel = *(iXCT[iz]+iy*NI+ix)-*(iXCTOld[iz]+iy*NI+ix)-(*(iparm+NPARM*(iCell-1)+4))/3;
irel = 3*irel;
if (irel<*(iparm+NPARM*(iCell-1)+5)) {
tmp=100+*(iparm+NPARM*(iCell-1)+32)
-irel*(*(iparm+NPARM*(iCell-1)+32))/(*(iparm+NPARM*(iCell-1)+5));
if (tmp!=0) {
ivel = 100*(*(iparm+NPARM*(iCell-1)+31))/tmp;
} else {
ivel=*(iparm+NPARM*(iCell-1)+31);
}
} else {
// <--- time of RRP stored in iparm(6) ---
ivel=*(iparm+NPARM*(iCell-1)+31);
}
*(iACTvOld[3]+nACTvOld)=iv+ivel;}
}*/
/*else if (iCell==3) {
if (iCell5Ex==0) {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)- dS1S2Old/20;
TRACE("\nCell=3 E dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else {
if (mBCL<600&&dS1S2Old<140/3) {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)- (dS1S2Old+67)/33;
TRACE("\nCell=3 A dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else if (mBCL<600&&dS1S2Old>=140/3) {
*(iACTvOld[3]+nACTvOld)=iv;
TRACE("\nCell=3 B dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else if (mBCL>=600&&dS1S2Old<=210/3) {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31);
TRACE("\nCell=3 C dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)- dS1S2Old/12;
TRACE("\nCell=3 D dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
}
}
}*/
continue;
}
// ------- neighbourhood search (2100) -------->
// go to iv > 100 situation and set ires = the part of iv < 100
ires=iv-100*(int)(iv/100);
for (l=0;l<12;l++) {
jx=ix+iseqx[l];
if ((jx<=-1)||(jx>(NI-1))) continue; // >=0 <NI
jy=iy+iseqy[l];
if ((jy<=-1)||(jy>(NJ-1))) continue; // >=0 <NJ
jz=iz+iseqz[l];
if ((jz<=-1)||(jz>(NK-1))) continue; // >=0 <NK
// >>>>> aniso: within the ellpisoid ? >>>>>>>>>>>>
if (ANISO==1 && iCell==7) {
dani=local(ix,iy,iz);
//TRACE("\nx,y,z,dani, %2d %2d %2d %f",ix,iy,iz,dani);
// -- if can't solve local coordinates, treat as isotropic -->
if (dani > 0.0001) {
//lctran(iseqx[l],iseqy[l],iseqz[l],dani,xani,yani,zani);
xani=iseqx[l]*tmswf[0][0]+iseqy[l]*tmswf[0][1]+iseqz[l]*tmswf[0][2];
yani=iseqx[l]*tmswf[1][0]+iseqy[l]*tmswf[1][1]+iseqz[l]*tmswf[1][2];
zani=iseqx[l]*tmswf[2][0]+iseqy[l]*tmswf[2][1]+iseqz[l]*tmswf[2][2];
dxani=xani*yaxis[1]*zaxis[2]+yani*yaxis[2]*zaxis[0]
+zani*yaxis[0]*zaxis[1]-zani*yaxis[1]*zaxis[0]
-xani*yaxis[2]*zaxis[1]-yani*yaxis[0]*zaxis[2];
dyani=xaxis[0]*yani*zaxis[2]+xaxis[1]*zani*zaxis[0]
+xaxis[2]*xani*zaxis[1]-xaxis[2]*yani*zaxis[0]
-xaxis[0]*zani*zaxis[1]-xaxis[1]*xani*zaxis[2];
dzani=xaxis[0]*yaxis[1]*zani+xaxis[1]*yaxis[2]*xani
+xaxis[2]*yaxis[0]*yani-xaxis[2]*yaxis[1]*xani
-xaxis[0]*yaxis[2]*yani-xaxis[1]*yaxis[0]*zani;
xani=dxani/dani;
yani=dyani/dani;
zani=dzani/dani;
// itms=maps(ix,iy,iz)+1
//TRACE("\nd %f %f %f %f",dxani,xani,yaxis[0],zaxis[0]);
itms1=*(iXCTapd[iz]+iy*NI+ix);
elp=xani*xani/vt2[itms1]+
yani*yani/vt2[itms1]+
zani*zani/vl2[itms1];
// write(0,*) x,y,z,elp
// TRACE("\n %d %f",itms1,elp);
if (elp > 1.0) continue;
}
}
// <<<<<<<<<<<<<<<<<<<< aniso <<<<<<<<<<<<<<<<<<<
mCell=*(mapCell[jz]+jy*NI+jx);
if ((iCell<=7)&&(mCell<=7)&&(((iCell-mCell)>1)||
((iCell-mCell)<-1))) continue;
if ((*(iparm+NPARM*(mCell-1)+33)>0)&&( (mCell>7 && iCell>7 && mCell!=iCell) || (mCell<=7 && iCell>mCell) || (mCell>7 && !(iCell==mCell || iCell==2)))) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)>0)&&( (mCell<=7 && iCell>mCell) || (mCell>7 && !(iCell==mCell || iCell==2)))) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)>0)&&(iCell>mCell)) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)<0)&&(iCell<mCell)) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)<0)&&( (mCell<=7 && iCell<mCell) || (mCell>7 && !(iCell==mCell || iCell==7)))) continue;
if ((*(iparm+NPARM*(mCell-1)+33)<0)&&( (mCell>7 && iCell>7 && mCell!=iCell) || (mCell<=7 && iCell<mCell) || (mCell>7 && !(iCell==mCell || iCell==7)))) continue;
//if (jx == 64 && jy == 50 && jz == 64) TRACE("\nC AVN %d",mCell);
if (mCell<=0) continue; // continue;
if (mCell>=15) continue; // continue;
if (mCell==5) iCell5Ex=1;
//if (iCell==8) TRACE("\nCell=8 %d, %d, %d, ic=%d, %d %d",jx,jy,jz,ic);;
// --- coupling interval ------>
idltt=ic-*(iXCT[jz]+jy*NI+jx);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) idltt=INFTIME;
// --- change in cycle length ------>
idltc=idltt+*(iXCTOld[jz]+jy*NI+jx)-*(iXCT[jz]+jy*NI+jx);
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME) idltc=0;
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
*(iparm+(1-1)*NPARM+23)>0)
idltc = ic-*(iXCT[jz]+jy*NI+jx)-*(iparm+(1-1)*NPARM+23);
// rdXCT(ic,jx,jy,jz);
// irp = time in phase 2 + mapACT/3 +plateau of potential in phase 3 *idltc/100
// --- absolute refractory period ------>
//irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3
// +*(iparm+(mCell-1)*NPARM+10)*idltc/100;
irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3;
//if (mCell==6)
// irp=(*(iparm+NPARM*(mCell-1)+4))/3;
irel=idltt-irp;
// ++++++++ in absolute refractory period ? +++++++
if (irel<=0) continue;
//if (*(mapAPD[jz]+jy*NI+jx)>20) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>20 && idltc<0) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>30 && idltc<0) idltc = 3*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>30 && idltc>0) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>40 && idltc>0) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>40 && idltc>0) idltc = 2*idltc;
//if (mCell==3) TRACE("\nCell=3 %d, %d, %d,%d,%d,%d",irp,ic,*(iXCT[jz]+jy*NI+jx),*(mapAPD[jz]+jy*NI+jx),*(iparm+(mCell-1)*NPARM+10),idltc);
*(mapAPD[jz]+jy*NI+jx) += *(iparm+(mCell-1)*NPARM+10)*idltc*3/100; //added by Zhu
//if (mCell==3) TRACE("\nCell=3 %d, %d, %d",*(mapAPD[jz]+jy*NI+jx)/3,idltc,irp);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME && mCell==3) {irel=INFTIME;*(mapAPD[jz]+jy*NI+jx)=0;}
// --- find automaticity in stimul data ----
// iparm(n,24), ICL: intrinsic cycle length (ms)
iSTOP =0;
if (*(iparm+NPARM*(mCell-1)+23)>0) { // !=0 August 10, 1996
// <--- next touch time should be beyound ARP of the cell --
for (is=0;is<nttl;is++) {
if (jx!=ipttl[0][is]) continue;
if (jy!=ipttl[1][is]) continue;
if (jz!=ipttl[2][is]) continue;
// --- iparm(23) used for adjusting intermediate change
// of EP intrinsic cycle length --->
ICL = *(iparm+NPARM*(mCell-1)+23);
ist = ic-*(iXCT[jz]+jy*NI+jx);
// PRT: protection indicator
// --- no protection ---->
if (*(iparm+NPARM*(mCell-1)+24)==0) {
if (ist<=irp) continue; //{iSTOP=1;break;}
//if (iSTOP==1)
ipttl[3][is]=ic+ICL; // ICL/3
/******************/
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=jx;
*(iACTvOld[1]+nACTvOld)=jy;
*(iACTvOld[2]+nACTvOld)=jz;
*(iACTvOld[3]+nACTvOld)=*(iparm+NPARM*(mCell-1)+31)+ires;
wtXCTm(ic,jx,jy,jz);
if (mxcycle>=NCYCL) {iSTOP=1;break;}
//if (ic==*(iXCT[jz]+jy*NI+jx)) continue;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;//added to by zhu
irel=0;
excited=1;
TRACE("\n %d, %d %d %d %d %d",*(iXCTOld[jz]+jy*NI+jx),*(iXCT[jz]+jy*NI+jx),ic,jx,jy,jz);
/******************/
//iSTOP=1;
continue; //break; // rewrite condition
}
if (idltt==INFTIME) continue;
//ist = ic-*(iXCT[jz]+jy*NI+jx);
// if (ist<=irp) goto loop21; // August 10, 1996
if (ist<=irp) continue; //{iSTOP=1;break;}
phsft =(float)100.*(idltt/ICL);
mxDLY =(float)*(iparm+NPARM*(mCell-1)+25);
mACCl =(float)*(iparm+NPARM*(mCell-1)+26);
if (mxDLY == 0 && mACCl == 0) continue;
icross=(float)*(iparm+NPARM*(mCell-1)+27);
if (icross == 0 || icross == 100) continue;
if (phsft<=icross)
delt=phsft*mxDLY/icross;
else
delt=mACCl-(phsft-icross)*mACCl/(100-icross);
// -- store touch time --->
// -- modify next stimulating time --->
ipttl[3][is]=ipttl[3][is]+(int)(ICL*delt/100);
//TRACE("\ntime=%4d,ixt=%4d,idltt=%4d,icl=%4d,phsft=%4d,intermediate=%4d",
// ic, *(iXCT[jz]+jy*NI+jx),idltt,ICL, (int)phsft, ipttl[3][is]);
// change value after each touch time
// avoiding from successive modification by surrounding cells
if (ic==*(iXCT[jz]+jy*NI+jx)) continue;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
irel=0;
excited=1;
//iSTOP=1;
continue; //break; // rewrite condition
}
}
if (iSTOP==1) continue;
if (irel==0) continue;
// +++++ special processing for BB +++++
if (mCell==5) {
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
// Add for BB interval by hui wang modified by Zhu
// variable ibbSTEP, nbbSTEP are added to store steps by first BB
// ibbSTEP is a function in bbDLYm(i,j,k)
nbbSTEP=0;
ibbDLY=0;
ibbSTEP=0;
bbDLYm(jx,jy,jz);
nbbSTEP=ibbSTEP;
// end of add for BB interval by hui wang
//ic+=10; // add by hw, BB interval
//TRACE("\n nHB = %d, ic= %d",nHB,ic);
for(kBB=0;kBB<nBB;kBB++) {
jx=iBB[0][kBB];
jy=iBB[1][kBB];
jz=iBB[2][kBB];
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=jx;
*(iACTvOld[1]+nACTvOld)=jy;
*(iACTvOld[2]+nACTvOld)=jz;
//*(iACTvOld[3]+nACTvOld)=100;
ibbDLY=0;
// Add for BB interval by hui wang,modified by zhu
ibbSTEP=0;
bbDLYm(jx,jy,jz);
ibbSTEP+=nbbSTEP;
ibbDLY=100*ND/(ibbSTEP+1);
if((ibbDLY*ibbSTEP)<100*ND) ibbDLY+=1;
// End of add for BB interval by hui wang
*(iACTvOld[3]+nACTvOld)=ibbDLY;
wtXCTm(ic,jx,jy,jz);
//if (jx==101 && jy==77 && jz==6) TRACE("\nB mxcycle=%d at ic=%d, iCell=%d",mxcycle,ic,mCell);
//if (mCell >2 && mCell <6) TRACE("\nB %d %d %d %d %d %d",mCell,jx,jy,jz,ic,nACTvOld);
if (mxcycle>=NCYCL) break;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
excited=1;
}
continue;
}
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=jx;
*(iACTvOld[1]+nACTvOld)=jy;
*(iACTvOld[2]+nACTvOld)=jz;
wtXCTm(ic,jx,jy,jz);
//TRACE("\nbreak3 mxcycle=%d NCYCL=%d ",mxcycle, NCYCL);
if (mxcycle>=NCYCL) break;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
irel = 3*irel;
//if (*(iXCTOld[jz]+jy*NI+jx)!=INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
// irel>*(iparm+NPARM*(mCell-1)+5))
// *(mapAPD[jz]+jy*NI+jx)=0;
// time of RRP stored in iparm(6)
if ((irel)<*(iparm+NPARM*(mCell-1)+5)) {
tmp=100+*(iparm+NPARM*(mCell-1)+32)
-irel*(*(iparm+NPARM*(mCell-1)+32))/(*(iparm+NPARM*(mCell-1)+5));
if (tmp!=0) {
ivel = 100*(*(iparm+NPARM*(mCell-1)+31))/tmp;
} else {
ivel=*(iparm+NPARM*(mCell-1)+31);
}
} else {
// <--- time of RRP stored in iparm(6) ---
ivel=*(iparm+NPARM*(mCell-1)+31);
}
*(mapSpeed[jz]+jy*NI+jx)=ivel-*(iparm+NPARM*(mCell-1)+31);//added by Zhu
// test results
//TRACE("\nmcell=%4d,ic=%4d,idltt=%4d,idltc=%4d,ivel=%4d",mCell,ic,idltt,idltc,ivel);
if (iCell!=mCell) {
if (mCell == 5) {
bbDLYm(jx,jy,jz);
*(iACTvOld[3]+nACTvOld)=ibbDLY;
//TRACE("\n BB2=%d, %d %d (%d %d %d) ic=%d ",*(iACTvOld[3]+nACTvOld),iv,ibbDLY,ix,iy,iz, ic);
continue;
}
*(iACTvOld[3]+nACTvOld)=ivel;
continue;
}
*(iACTvOld[3]+nACTvOld)=ivel+ires;
}
// <------- END of neighbourhood search (2100) -----
// >>>>>>>> anisotropy >>>>>
if (ANISO==1 && iCell == 7) {
// ltrat==2;
if (*(iXCTapd[iz]+iy*NI+ix) < 2) {
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=ix;
*(iACTvOld[1]+nACTvOld)=iy;
*(iACTvOld[2]+nACTvOld)=iz;
//*(iACTvOld[3]+nACTvOld)=ires+
// *(iparm+NPARM*(iCell-1)+31);
*(iACTvOld[3]+nACTvOld)=ires+
*(iparm+NPARM*(iCell-1)+31)+*(mapSpeed[iz]+iy*NI+ix);
*(iXCTapd[iz]+iy*NI+ix)+=1;
} else {
*(iXCTapd[iz]+iy*NI+ix)=0;
}
}
// <<<<<<<<<<<
}
// <------- END of propagation (2000) -----
// +++++++++++ for high speed ++++++++
mACTv=nACTvOld;
// ------- propagation (1000) -------->
for(i=1;i<=nACTvOld;i++) {
idist=(int)(*(iACTvOld[3]+i)/100);
if (idist<2) continue;
*(jACTv[0]+1)=*(iACTvOld[0]+i);
*(jACTv[1]+1)=*(iACTvOld[1]+i);
*(jACTv[2]+1)=*(iACTvOld[2]+i);
ires=*(iACTvOld[3]+i)-idist*100;
nStep=0;
nbrch=1;
jdist=1;
while (1) {
for (j=1;j<=nbrch;j++) {
jx0=*(jACTv[0]+j);
jy0=*(jACTv[1]+j);
jz0=*(jACTv[2]+j);
mCell=*(mapCell[jz0]+jy0*NI+jx0);
if (mCell==5) iCell5Ex=1;
for (l=0;l<12;l++) {
jx=jx0+iseqx[l];
if ((jx<=-1)||(jx>(NI-1))) continue; // <0 or >=NI
jy=jy0+iseqy[l];
if ((jy<=-1)||(jy>(NJ-1))) continue; // <0 or >=NJ
jz=jz0+iseqz[l];
if ((jz<=-1)||(jz>(NK-1))) continue; // <0 or >=NK
kCell = *(mapCell[jz]+jy*NI+jx);
//if (jx == 64 && jy == 50 && jz == 64) TRACE("\nE AVN %d",kCell);
if (kCell != mCell) continue;
//++++++++ in effective refractory period ? +++++++
// >>>>> aniso: within the ellpisoid ? >>>>>>>>>>>>
if (ANISO==1 && mCell==7) {
dani=local(jx0,jy0,jz0);
//TRACE("\nx,y,z,dani, %2d %2d %2d %f",ix,iy,iz,dani);
// -- if can't solve local coordinates, treat as isotropic -->
if (dani > 0.0001) {
//lctran(iseqx[l],iseqy[l],iseqz[l],dani,xani,yani,zani);
xani=iseqx[l]*tmswf[0][0]+iseqy[l]*tmswf[0][1]+iseqz[l]*tmswf[0][2];
yani=iseqx[l]*tmswf[1][0]+iseqy[l]*tmswf[1][1]+iseqz[l]*tmswf[1][2];
zani=iseqx[l]*tmswf[2][0]+iseqy[l]*tmswf[2][1]+iseqz[l]*tmswf[2][2];
dxani=xani*yaxis[1]*zaxis[2]+yani*yaxis[2]*zaxis[0]
+zani*yaxis[0]*zaxis[1]-zani*yaxis[1]*zaxis[0]
-xani*yaxis[2]*zaxis[1]-yani*yaxis[0]*zaxis[2];
dyani=xaxis[0]*yani*zaxis[2]+xaxis[1]*zani*zaxis[0]
+xaxis[2]*xani*zaxis[1]-xaxis[2]*yani*zaxis[0]
-xaxis[0]*zani*zaxis[1]-xaxis[1]*xani*zaxis[2];
dzani=xaxis[0]*yaxis[1]*zani+xaxis[1]*yaxis[2]*xani
+xaxis[2]*yaxis[0]*yani-xaxis[2]*yaxis[1]*xani
-xaxis[0]*yaxis[2]*yani-xaxis[1]*yaxis[0]*zani;
xani=dxani/dani;
yani=dyani/dani;
zani=dzani/dani;
// itms=maps(ix,iy,iz)+1
//TRACE("\nd %f %f %f %f",dxani,xani,yaxis[0],zaxis[0]);
itms1=*(iXCTapd[jz0]+jy0*NI+jx0);
elp=xani*xani/vt2[itms1]+
yani*yani/vt2[itms1]+
zani*zani/vl2[itms1];
// write(0,*) x,y,z,elp
// TRACE("\n %d %f",itms1,elp);
if (elp > 1.0) continue;
}
}
idltt=ic-*(iXCT[jz]+jy*NI+jx);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) idltt=INFTIME;
// --- change in cycle length ------>
idltc=idltt+*(iXCTOld[jz]+jy*NI+jx)-*(iXCT[jz]+jy*NI+jx);
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME) idltc=0;
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
*(iparm+(1-1)*NPARM+23)>0)
idltc = ic-*(iXCT[jz]+jy*NI+jx)-*(iparm+(1-1)*NPARM+23);
// rdXCT(ic,jx,jy,jz);
// irp = time in phase 2 + mapACT/3 +plateau of potential in phase 3 *idltc/100
// --- absolute refractory period ------>
//irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3
// +*(iparm+(mCell-1)*NPARM+10)*idltc/100;
irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3;
//if (mCell==6)
// irp=(*(iparm+NPARM*(mCell-1)+4))/3;
irel=idltt-irp;
if (irel<=0) continue; // continue;
if (*(iXCT[jz]+jy*NI+jx)==INFTIME && mCell==3) {irel=INFTIME;*(mapAPD[jz]+jy*NI+jx)=0;}
*(mapAPD[jz]+jy*NI+jx) += *(iparm+(mCell-1)*NPARM+10)*idltc*3/100; //added by Zhu
/*
idltt=ic-*(iXCT[jz]+jy*NI+jx);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) idltt=INFTIME;
idltc=idltt+*(iXCTOld[jz]+jy*NI+jx)-*(iXCT[jz]+jy*NI+jx);
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME) idltc=0;
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
*(iparm+(1-1)*NPARM+23)>0)
idltc = ic-*(iXCT[jz]+jy*NI+jx)-*(iparm+(1-1)*NPARM+23);
// rdXCT(ic,jx,jy,jz);
// irp=(*(iparm+NPARM*(kCell-1)+4)+*(mapACT[jz]+jy*NI+jx))/3+
// *(iparm+(kCell-1)*NPARM+10)*idltc/100;
//irp=(*(iparm+NPARM*(kCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3+
// *(iparm+(kCell-1)*NPARM+10)*idltc/100;
irp=(*(iparm+NPARM*(kCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3;
*(mapAPD[jz]+jy*NI+jx) += *(iparm+(kCell-1)*NPARM+10)*idltc*3/100; //added by Zhu
irel=idltt-irp;
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) irel=INFTIME;
*/
irel = 3*irel;
//if (*(iXCTOld[jz]+jy*NI+jx)!=INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
// irel>=*(iparm+NPARM*(kCell-1)+5))
// *(mapAPD[jz]+jy*NI+jx)=0;
if ((irel)<*(iparm+NPARM*(kCell-1)+5)) {
tmp=100+*(iparm+NPARM*(mCell-1)+32)
-irel*(*(iparm+NPARM*(mCell-1)+32))/(*(iparm+NPARM*(mCell-1)+5));
if (tmp!=0) {
ivel = 100*(*(iparm+NPARM*(mCell-1)+31))/tmp;
} else {
ivel=*(iparm+NPARM*(mCell-1)+31);
}
} else {
ivel=*(iparm+NPARM*(kCell-1)+31);
}
*(mapSpeed[jz]+jy*NI+jx)=ivel-*(iparm+NPARM*(kCell-1)+31);//added by Zhu
nStep=nStep+1;
*(kACTv[0]+nStep)=jx;
*(kACTv[1]+nStep)=jy;
*(kACTv[2]+nStep)=jz;
// nStep++;
mACTv=mACTv+1;
*(iACTvOld[0]+mACTv)=jx;
*(iACTvOld[1]+mACTv)=jy;
*(iACTvOld[2]+mACTv)=jz;
*(iACTvOld[3]+mACTv)=ivel+ires;
// mACTv++;
// TRACE(" D%d,",mACTv);
wtXCTm(ic,jx,jy,jz);
//if (jx==101 && jy==77 && jz==6) TRACE("\nD mxcycle=%d at ic=%d, iCell=%d",mxcycle,ic,kCell);
//if (kCell >2 && kCell <6) TRACE("\nD %d %d %d %d %d %d",kCell,jx,jy,jz,ic,mACTv);
//TRACE("\nbreak4 mxcycle=%d NCYCL=%d ",mxcycle, NCYCL);
if (mxcycle>=NCYCL) {
//TRACE("\nbreak5 iSTOP=%d mxcycle=%d,NCYCL=%d",iSTOP, mxcycle, NCYCL);
iSTOP =1;
break;
}
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
excited=1;
}
/*// >>>>>>>> anisotropy >>>>>
if (ANISO==1 && mCell == 7) {
// ltrat==3;
if (*(iXCTapd[jz0]+jy0*NI+jx0) < 3) {
mACTv=mACTv+1;
*(iACTvOld[0]+mACTv)=jx0;
*(iACTvOld[1]+mACTv)=jy0;
*(iACTvOld[2]+mACTv)=jz0;
*(iACTvOld[3]+mACTv)=ivel+ires;
//*(iACTvOld[3]+nACTvOld)=ires+
// *(iparm+NPARM*(iCell-1)+31);
*(iACTvOld[3]+nACTvOld)=ires+
*(iparm+NPARM*(mCell-1)+31)+*(mapSpeed[jz0]+jy0*NI+jx0);
*(iXCTapd[jz0]+jy0*NI+jx0)+=1;
} else {
*(iXCTapd[jz0]+jy0*NI+jx0)=0;
}
}
// <<<<<<<<<<<*/
if (iSTOP ==1) break;
}
if (iSTOP ==1) break;
if (nStep==0) break; // continue;
jdist=jdist+1;
if (jdist>=idist) break; // continue;
for(k=1;k<=nStep;k++) {
*(jACTv[0]+k)=*(kACTv[0]+k);
*(jACTv[1]+k)=*(kACTv[1]+k);
*(jACTv[2]+k)=*(kACTv[2]+k);
}
nbrch=nStep;
nStep=0;
}
}
//TRACE("\nbreak5 iSTOP=%d ",iSTOP);
if (iSTOP ==1) break;
// <------- END of propagation (1000) -------------
if (excited == 0) break;
nACTv=mACTv;
// nblck=nblck+nACTv;
for(i=1;i<=nACTv;i++) {
for(j=0;j<4;j++) {
*(iACTv[j]+i)=*(iACTvOld[j]+i);
}
}
} // END of whole while loop
TRACE("\nmxcycle=%d",mxcycle);
mxcycle++; // hui
// add HB info
for (itmp=0; itmp<50*ND; itmp++) {
for (tmp=0;tmp<NCYCL;tmp++) {
vHB[tmp][itmp]=0;
}
}
for (itmp=0; itmp<nHB; itmp++) {
l=iHB[0][itmp];
j=iHB[1][itmp];
k=iHB[2][itmp];
if (itmp==0) i=*(locXCT[k]+j*NJ+l); // Consider only the point near AV Node
for (tmp=0;tmp<mxcycle;tmp++) {
vHB[tmp][itmp]=*(mapXCTm[tmp]+i);
}
}
// Save
CFile f;
CFileException e;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'x');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'t');
if (!f.Open( dataPath+"tour.xct ", CFile::modeCreate | CFile::modeWrite, &e )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e.m_cause << "\n";
#endif
}
//f.Write(&mxcycle,2);
f.Write(&miBN,2);
f.Write(&ic,2);
f.Write(&totalCell,4);
for(j=0;j<mxcycle;j++) {
for(i=0;i<totalCell;i++) f.Write(mapXCTm[j]+i,2);
}
f.Close();
/*
FILE * iow;
iow=fopen("fpMapXCTm.txt","wt");
if (iow == NULL) {
fprintf(stderr, "Open .txt for write failed! \n");
return;
}
long temploc;
temploc=*(locXCT[45]+22*NJ+33);
fprintf(iow,"33 22 45 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[40]+30*NJ+32);
fprintf(iow,"32 30 40 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[48]+20*NJ+30);
fprintf(iow,"30 20 48 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[56]+8*NJ+26);
fprintf(iow,"26 8 56 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[62]+10*NJ+21);
fprintf(iow,"21 10 62 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[62]+30*NJ+13);
fprintf(iow,"13 30 62 %3d\n",*(mapXCTm[0]+temploc));
for(l=0;l<mxcycle;l++) {
fprintf(iow,"l=%d\n",l);
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=NI-1;i>-1;i--) {
temploc = *(locXCT[k]+j*NJ+i);
if (temploc < 0) fprintf(iow," ");
else fprintf(iow,"%3d ",*(mapXCTm[l]+temploc));
}
fprintf(iow,"j=%d\n",j);
}
fprintf(iow,"k=%d\n",k);
}
}
fclose(iow);
*/
for(i=0;i<4;i++) {
free(iACTv[i]);
free(iACTvOld[i]);
free(jACTv[i]);
free(kACTv[i]);
}
for(i=0;i<NK;i++) {
free(iXCT[i]);
free(iXCTapd[i]);
free(iXCTOld[i]);
}
} | 348ba5aa48f79e6ffa9e87d228fafae34b1b9683.cu | #include <omp.h>
#include <stdio.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <crtdbg.h>
#include <cuda_runtime.h>
#include <afxwin.h>
#include <iostream>
#include <vector>
#include <fstream>
using namespace std;
#define CRTDBG_MAP_ALLOC
const short int ND = 1;
const short int ND3 = 1;
const short int NI = 56;
const short int NJ = 56;
const short int NK = 90;
const short int NL = 344;
const short int NPARM = 35;
const short int NCELL = 14;
const short int INFTIME = 9999;
const short int ANISO = 1; // aniso switch
const short int NCYCL = 20; // max cycle num
const short int TSTEP = 2000;
const short int NENDO = 4000;
char flg_thread=1,flg_display,flg_calculate,flg_stop;
char flg_calcu_option;
float HRTscale,HRTx0,HRTy0,HRTz0,phai,pusai,theta;
short int ipttl[4][NI*ND*ND],nttl,idist,iHB[4][50*ND3],iBB[3][50*ND3];
short int kTop,kBtm,kVtr,nHB,nBB;
short int ic,ibbDLY,idltt;
short int ibbSTEP;
char flag_flop;
char *mapCell[NK];
short int nPos,nv[3],maxXctStep;
short int *iparm;
float *ydata[NCELL];
float tmswf[3][6],alp;
short int la012[NCELL],la0123[NCELL];
float *r[3],*rn[3];
float *aw[NL], *bw;
short int *kmin, *kmax;
short int *mag[4];
short int *mapAPD[NK];
short int *mapACT[NK];
short int nbbSTEP;
short int *mapSpeed[NK];
short int *mapXCTm[NCYCL]; // store the exciting time 保存兴奋时间
int NendoB, NendoC;
short int endoBx[NENDO*ND3];
short int endoBy[NENDO*ND3];
short int endoBz[NENDO*ND3];
short int endoCx[NENDO*ND3];
short int endoCy[NENDO*ND3];
short int endoCz[NENDO*ND3];
// epicardial variable 心外膜变量
const short int Nepic=NI*NJ*2;//short int Nepic 心外膜计算个数
vector<short int> epicX; // x
vector<short int> epicY; // y
vector<short int> epicZ; // z
short int epicX_old[Nepic];
short int epicY_old[Nepic];
short int epicZ_old[Nepic];
float *POTi;
float *POT[NL],*POT_reduce[NL];//*POT_reduce[NL] by sf 090622
float VCG[3],bufVCG[2][3],bufGRD;
short int nTimeStep,itbuf,nextStep;
short int *iStep;
char answer;
long mNub;
long *locXCT[NK];
long totalCell;
// anisotropy variables 各个方向的变量
short int maxlay;
float *fibdir[3];
float vl2[10], vt2[10], rrat1;
float planedir[3][30];
float prx[12][12], pry[12][12], prz[12][12];
float xaxis[3],yaxis[3],zaxis[3];
short int mBCL,miBN,mxcycle,idltc,mS2ST,mS2CL,mS2BN;
short int ipstm[3][NI*ND*ND];
short int vHB[NCYCL][50*ND3];
short int excited=0;
CString dataPath="E:\\chuan50\\";
const short int useGPU=1,gpuspeed=17;//by sf 090403 useCPU 1--yes 0--no gpuspeed 1 or 17
short int GPUnum=1,corenum=0;//by sf 090823 the number of GPU device,allnum按GPUnum,corenum次序存
// 线程数量
short int threadnum=4;//by sf 090403 threadnum<0 auto >0 set number of thread=threadnum
short int iTimebegin=1,iTimeend;
float **gatheralldpl;//by sf 090408 for write dpl[3] in BSPitmm
int **gatherallijk,*countallijk,*countallijk_reduce,*itask[2],*iloops[3],isumdipoles=0;//,*iTimetid;//by sf 090408 for write the ijk of dpl[3] in BSPitmm
double starttime,endtime;
double bsptime[4] = {0.0,0.0,0.0,0.0};
int BSPitmmcount(short int iTime0);
void rdHRT(void);
void rdpos(void);
void rdnod(void);
void rdmtx(void);
void rdelc(void);
void locfile(void);
void ECGcal(void);
void geoinfc(void);
void setaniso(void);
void neibdir(void);
void stminvx(short int);
void XCTinvcm(void);
void fibplane(void);
void fibdirct(void);
void savACT(void);
//void savACT(int myid);
void freeFibdir(void);
void freemapAPDcs(void);
void freemapAPD(void);
void freebrs(void);
void freemagcs(void);
void freePOTcs(void);
float *d_r,*d_rn,*d_tm;
short int *d_tnd;
float *d_POTi=0, *d_der=0,*d_endoHnnA=0,*d_surfPOTi=0;
short int *d_endoBx=0;
short int *d_endoBy=0;
short int *d_endoBz=0;
short int *d_endoCx=0;
short int *d_endoCy=0;
short int *d_endoCz=0;
short int *d_epicX=0;
short int *d_epicY=0;
short int *d_epicZ=0;
float *d_epicPOTold=0;
//------------ 2009-2-6-16 BY SWF---------
// comment:
extern "C" short int cudamain(int argc, char** argv);
extern "C" void gpu_freetransdata();
extern "C" void gpu_transdata(short int epicX[Nepic],short int epicY[Nepic],short int epicZ[Nepic],short int *g_tnd[3],float *g_r[3],float *g_rn[3],short int g_endoBx[NENDO*ND3],short int g_endoBy[NENDO*ND3],short int g_endoBz[NENDO*ND3],short int g_endoCx[NENDO*ND3],short int g_endoCy[NENDO*ND3],short int g_endoCz[NENDO*ND3],float g_tm[3][6]);
extern "C" void gpu_BSPitmm_Malloc(float *g_POTi,float g_der[NL],float *g_endoHnnA,float *g_surfPOTi);
extern "C" void gpu_BSPitmm_HostToDevice(float *g_POTi,float g_der[NL],float *g_endoHnnA,float *g_surfPOTi);
extern "C" void gpu_BSPitmm_DeviceToHost(float *g_epicPOTold,float *g_POTi,float g_der[NL],float *g_endoHnnA,float *g_surfPOTi);
extern "C" void gpu_dpl_all(short int do_epicPOT,float g_posi,float g_posj,float g_posk,short int g_nPos,float g_dpl[3],float *g_POTi,float g_der[NL],
float g_HRTx0,float g_HRTy0,float g_HRTz0,int g_NendoB,int g_NendoC,
float *g_endoHnnA,short int *g_endoBx,short int *g_endoBy,short int *g_endoBz,float g_tm[3][6],float *g_epicPOTold);
extern "C" void gpu_dpl_nPos(float g_posi,float g_posj,float g_posk,short int g_nPos,float g_dpl[3],float *g_POTi,float g_der[NL]);
extern "C" void gpu_dpl_nPos_2(float g_posi,float g_posj,float g_posk,float g_dpl[3]);
extern "C" void gpu_dpl_Nendo(float g_posi,float g_posj,float g_posk,float g_HRTx0,float g_HRTy0,float g_HRTz0,
int g_NendoBC,int g_offset,float g_dpl[3],float *g_endoHnnA,
short int *g_endoBx,short int *g_endoBy,short int *g_endoBz,float g_tm[3][6]);
extern "C" void gpu_dpl_Nepic(float g_posi,float g_posj,float g_posk,float g_HRTx0,float g_HRTy0,float g_HRTz0,
float g_dpl[3],float g_tm[3][6],float *g_epicPOTold);
//extern "C" void dplpro(float *POTi,const short int NL, const float **r);
//------------ 2009-2-6-16 BY SWF---------
//int main(int argc,char *argv[])
//void hpc(int argc, char** argv)
void main(int argc, char** argv)
{
int myid, numprocs;
int namelen;
//------------ 2009-2-6-16 BY SWF---------
// comment:
FILE *fptime;
//------------ 2009-2-6-16 BY SWF---------
short int ipttl[4][56];
HFILE hFp;
short int nVCG,BSPm,mTime,iTime,i,j,k;
short int nsnrt;
float *VCGs[3];
float eff;
float *endoHnnA;
float *endoPOT[TSTEP];
short int index;
int nn,n0,n1,n2,ni;
float pi=3.14159;
short int *tnd[3];
int li;
void XCTcalm(void);
//void XCTcalm(int myid);
void BSPcalm(void);
void rdAPDm(void);
void freeXCTm(void);
fprintf(stdout, "Begin computing. %f\n", clock());
for(i=0;i<NK;i++) {
mapCell[i] = (char *) malloc(NI*NJ);
mapAPD[i] = (short int *) malloc(NI*NJ*2);
mapSpeed[i] = (short int *) malloc(NI*NJ*2);
mapACT[i] = (short int *) malloc(NI*NJ*2);
locXCT[i] = (long *) malloc(NI*NJ*4);
if((mapCell[i]==NULL)||(mapAPD[i]==NULL)||(mapACT[i]==NULL)||(locXCT[i]==NULL)) {
fprintf(stdout,"out of memory\n");
fflush(stdout);
return;
}
}
iparm = (short int *) malloc(NCELL*NPARM*2);
kmin = (short int *) malloc(NI*NJ*2);
kmax = (short int *) malloc(NI*NJ*2);
iStep = (short int *) malloc(TSTEP*2);
if((iparm==NULL)||(kmin==NULL)||(kmax==NULL)||(iStep==NULL)) {
fprintf(stdout,"out of memory\n");
fflush(stdout);
return;
}
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
return;
}
for(i=0;i<3;i++) {
r[i] = (float *) malloc(NL*4);
rn[i] = (float *) malloc(NL*4);
if((r[i]==NULL)||(rn[i]==NULL)) {
fprintf(stdout,"out of memory\n");
fflush(stdout);
return;
}
}
for(i=0;i<NCELL;i++) {
ydata[i] = (float *) malloc(1000*ND*4);
if(ydata[i]==NULL) {
fprintf(stdout,"out of memory\n");
fflush(stdout);
return; }
}
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
return;
}
for(i=0;i<4;i++) {
mag[i] = (short int *) malloc(50000*ND3*2);
if(mag[i]==NULL) {
fprintf(stdout,"out of memory\n");
fflush(stdout);
return; }
}
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=0;i<NI;i++) {
*(mapAPD[k]+j*NI+i)=0;
*(mapSpeed[k]+j*NI+i)=0;
}
}
}
for(i=0;i<4;i++) {
for(li=0;li<50000*ND3;li++) {
*(mag[i]+li)=0;
}
}
//TRACE("\nReading HRT file ...");
rdHRT();
if(flag_flop||(flg_thread==0)) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
//TRACE("\nReading APD file ...");
rdAPDm();
if(flag_flop||(flg_thread==0)) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
//TRACE("\nReading POS file ...");
rdpos();
if(flag_flop||(flg_thread==0)) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
//TRACE("\nReading NOD file ...");
rdnod();
if(flag_flop||(flg_thread==0)) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
//TRACE("\nReading ELC file ...");
rdelc();
if(flag_flop||(flg_thread==0)) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
//TRACE("\nLocating Cell Sequence ...");
locfile();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
//TRACE("\nFinding Geometric Info ...");
geoinfc();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
return;
}
if (ANISO==1) {
//TRACE("\nCalculating Rotating Anisotropy ...");
for (i=0; i<3; i++) {
fibdir[i] = (float *) malloc(50000*ND3*4);
if (fibdir[i]==NULL) {
fprintf(stdout,"Out of memory ! !\n");
fflush(stdout);
return;// 0;
}
}
for(i=0;i<3;i++) {
for(li=0;li<50000*ND3;li++) {
*(fibdir[i]+li)=0.;
}
}
//TRACE("\nCalculating setaniso ...");
setaniso();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nCalculating neibdir ...");
neibdir();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nCalculating stminvx ...");
stminvx(50);
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nCalculating XCTinvcm ...");
XCTinvcm();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nCalculating fibplane ...");
fibplane();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nCalculating fibdirct ...");
fibdirct();
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nCompleting Rotating Anisotropy ...");
}
//TRACE("\nStimulus calculating ...");
stminvx(20*ND);
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freemagcs();
freeFibdir();
return;
}
//TRACE("\nExcitation estimating ...");
XCTinvcm();
savACT();
fflush(stdout);
freemagcs();
for(i=0;i<NCYCL;i++) {
mapXCTm[i]=(short int *) malloc(50000*ND3*2);
if((mapXCTm[i]==NULL)) {
fprintf(stdout,"Out of memory ! !\n");
fflush(stdout);
return;// 0;
}
}
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freeXCTm();
freeFibdir();
return;
}
//TRACE("\nExcitation calculating ...");
XCTcalm();
//fprintf(stdout,"XCTcalm()ok=;myid=%d \n",myid);
fflush(stdout);
if(flg_thread==0) {
freemapAPDcs();
freemapAPD();
freebrs();
freeXCTm();
freeFibdir();
return;
}
if(!flg_calcu_option) {
for(i=0;i<NL;i++) {
POT[i]=(float *) malloc(TSTEP*4);
POT_reduce[i]=(float *) malloc(TSTEP*4);//by sf 090622
aw[i]=(float *) malloc(NL*4);
if((POT[i]==NULL)||(aw[i]==NULL)) {
// MessageBox(NULL,"Out of memory !",NULL,MB_OK);
cout<<"Out of memory !"<<endl;
exit(0);
}
}
for(i=0;i<NL;i++) {
for(j=0;j<TSTEP;j++) {
*(POT[i]+j)=(float)0;
*(POT_reduce[i]+j)=(float)0;//by sf 090622
}
}
bw=(float *) malloc(NL*4);
POTi=(float *) malloc(NL*4);
if((POTi==NULL)||(bw==NULL)) {
// MessageBox(NULL,"Out of memory !",NULL,MB_OK);
cout<<"Out of memory !"<<endl;
exit(0);
}
for(i=0;i<NL;i++) *(POTi+i)=(float)0;
if(flg_thread==0) {
freemapAPD();
freemapAPDcs();
freebrs();
freeXCTm();
freePOTcs();
freeFibdir();
return;
}
//TRACE("\nReading MTX file ...");
rdmtx();
if(flag_flop||(flg_thread==0)) {
freemapAPD();
freemapAPDcs();
freebrs();
freeXCTm();
freePOTcs();
freeFibdir();
return;
}
//TRACE("\nBSPM calculating ...");
//------------ 2009-2-4-15 BY SWF---------
// comment: test data trans
//int mydata[2]={20,60};
//printf("my%d,%d\n", mydata[0],mydata[1]);
//printf("aa%f,%f\n", *POTi,*(POTi+1));
//dplpro(POTi,NL,r);
//printf("aa%f,%f\n", *POTi,*(POTi+1));
//printf("my%d,%d\n", mydata[0],mydata[1]);
//------------ 2009-2-4-15 BY SWF---------
//------------ 2009-2-6-16 BY SWF---------
// comment:
starttime = clock();
fprintf(stdout,"starttime = %f\n", starttime);
//------------ 2009-2-6-16 BY SWF---------
if (useGPU==1)
{
GPUnum=cudamain(argc, argv);
fprintf(stdout,"GPUnum = %d", GPUnum);
};
BSPcalm();
//------------ 2009-2-6-16 BY SWF---------
// comment:
endtime = clock();
fprintf(stdout,"\nendtime = %f\n", endtime);
/*if (myid==0)
{
//fprintf(stdout,"sd test- endtime = %f,all-time = %f,threadnum=%d,useGPU=%d,numprocs=%d,nTimeStep=%d\n", starttime,(endtime-starttime)/CLK_TCK,threadnum,useGPU,numprocs,nTimeStep);
fprintf(stdout,"sd test all-time=%f,useGPU=%d,threadnum=%d,numprocs=%d,nTimeStep=%d\n",(endtime-starttime)/CLK_TCK,useGPU,threadnum,numprocs,nTimeStep);
fptime=fopen(dataPath+"gputime.txt","a") ;
fprintf(fptime,"sd test all-time=%f,useGPU=%d,threadnum=%d,numprocs=%d,nTimeStep=%d\n",(endtime-starttime)/CLK_TCK,useGPU,threadnum,numprocs,nTimeStep);
fclose(fptime);
//fptime=fopen(dataPath+"task.txt","a") ;
//fprintf(fptime,"sd test all-time=%f,useGPU=%d,threadnum=%d,numprocs=%d,nTimeStep=%d\n",(endtime-starttime)/CLK_TCK,useGPU,threadnum,numprocs,nTimeStep);
//for(i=0;i<2;i=i+1)
//{
// for(j=0;j<=nTimeStep;j=j+1)
// {
// fprintf(fptime,"itask[%d][%d]=%d\n",i,j,*(itask[i]+j));
// }
//};
//for(i=0;i<3;i=i+1)
//{
// for(j=0;j<=nTimeStep;j=j+1)
// {
// fprintf(fptime,"iloops[%d][%d]=%d\n",i,j,*(iloops[i]+j));
// }
//};
//fclose(fptime);
}*/
//------------ 2009-2-6-16 BY SWF---------
if(flag_flop||(flg_thread==0)) {
freemapAPD();
freemapAPDcs();
freebrs();
freeXCTm();
freePOTcs();
freeFibdir();
return;
}
//TRACE("\nECG and VCG calculating ...");
ECGcal();
}
freemapAPD();
freemapAPDcs();
freebrs();
freeXCTm();
freeFibdir();
if(!flg_calcu_option) {
freePOTcs();
}
fprintf(stdout,"Simulation End !\n");
fflush(stdout);
flg_thread=0;
flg_display=0;
flg_calculate=0;
flg_stop=0;
return;
}
void rdHRT(void) {
HFILE hFp;
short int i, j, k, nCell, index;
hFp=_lopen(dataPath+"tour.hrt ",OF_READ);
if (hFp==HFILE_ERROR) {
fprintf(stdout,"Can not open nod file ! !\n");
fflush(stdout);
flag_flop=1;
return;
}
_lread(hFp,&nttl,2);
if (nttl>NI/ND) nttl=NI/ND;
/**
* read stimulation cell's position
*/
for (i=0;i<nttl;i++) {
_lread(hFp,&ipttl[0][i*ND3],2);
_lread(hFp,&ipttl[1][i*ND3],2);
_lread(hFp,&ipttl[2][i*ND3],2);
_lread(hFp,&ipttl[3][i*ND3],2);
}
if (ND == 2) {
for (i=0;i<nttl;i++) {
ipttl[0][i*ND3] *= ND;
ipttl[1][i*ND3] *= ND;
ipttl[2][i*ND3] *= ND;
ipttl[3][i*ND3] *= ND;
for (j = 1; j < ND3; j++) {
ipttl[0][i*ND3+j] = ipttl[0][i*ND3];
ipttl[1][i*ND3+j] = ipttl[1][i*ND3];
ipttl[2][i*ND3+j] = ipttl[2][i*ND3];
ipttl[3][i*ND3+j] = ipttl[3][i*ND3];
}
ipttl[0][i*ND3+1] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+2] = ipttl[1][i*ND3]+1;
ipttl[0][i*ND3+3] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+3] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+4] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+5] = ipttl[0][i*ND3]+1;
ipttl[2][i*ND3+5] = ipttl[2][i*ND3]+1;
ipttl[1][i*ND3+6] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+6] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+7] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+7] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+7] = ipttl[2][i*ND3]+1;
}
}
if (ND == 3) {
for (i=0;i<nttl;i++) {
ipttl[0][i*ND3] *= ND;
ipttl[1][i*ND3] *= ND;
ipttl[2][i*ND3] *= ND;
//ipttl[3][i*ND3] *= ND;
for (j = 1; j < ND3; j++) {
ipttl[0][i*ND3+j] = ipttl[0][i*ND3];
ipttl[1][i*ND3+j] = ipttl[1][i*ND3];
ipttl[2][i*ND3+j] = ipttl[2][i*ND3];
ipttl[3][i*ND3+j] = ipttl[3][i*ND3];
}
// 00(1,2)
ipttl[2][i*ND3+1] = ipttl[2][i*ND3]+1;
ipttl[2][i*ND3+2] = ipttl[2][i*ND3]+2;
// 01(0,1,2)
ipttl[1][i*ND3+3] = ipttl[1][i*ND3]+1;
ipttl[1][i*ND3+4] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+4] = ipttl[2][i*ND3]+1;
ipttl[1][i*ND3+5] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+5] = ipttl[2][i*ND3]+2;
// 02(0,1,2)
ipttl[1][i*ND3+6] = ipttl[1][i*ND3]+2;
ipttl[1][i*ND3+7] = ipttl[1][i*ND3]+2;
ipttl[2][i*ND3+7] = ipttl[2][i*ND3]+1;
ipttl[1][i*ND3+8] = ipttl[1][i*ND3]+2;
ipttl[2][i*ND3+8] = ipttl[2][i*ND3]+2;
// 10(0,1,2)
ipttl[0][i*ND3+9] = ipttl[0][i*ND3]+1;
ipttl[0][i*ND3+10] = ipttl[0][i*ND3]+1;
ipttl[2][i*ND3+10] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+11] = ipttl[0][i*ND3]+1;
ipttl[2][i*ND3+11] = ipttl[2][i*ND3]+2;
// 11(0,1,2)
ipttl[0][i*ND3+12] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+12] = ipttl[1][i*ND3]+1;
ipttl[0][i*ND3+13] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+13] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+13] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+14] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+14] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+14] = ipttl[2][i*ND3]+2;
// 12(0,1,2)
ipttl[0][i*ND3+15] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+15] = ipttl[1][i*ND3]+2;
ipttl[0][i*ND3+16] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+16] = ipttl[1][i*ND3]+2;
ipttl[2][i*ND3+16] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+17] = ipttl[0][i*ND3]+1;
ipttl[1][i*ND3+17] = ipttl[1][i*ND3]+2;
ipttl[2][i*ND3+17] = ipttl[2][i*ND3]+2;
// 20(0,1,2)
ipttl[0][i*ND3+18] = ipttl[0][i*ND3]+2;
ipttl[0][i*ND3+19] = ipttl[0][i*ND3]+2;
ipttl[2][i*ND3+19] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+20] = ipttl[0][i*ND3]+2;
ipttl[2][i*ND3+20] = ipttl[2][i*ND3]+2;
// 21(0,1,2)
ipttl[0][i*ND3+21] = ipttl[0][i*ND3]+2;
ipttl[1][i*ND3+21] = ipttl[1][i*ND3]+1;
ipttl[0][i*ND3+22] = ipttl[0][i*ND3]+2;
ipttl[1][i*ND3+22] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+22] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+23] = ipttl[0][i*ND3]+2;
ipttl[1][i*ND3+23] = ipttl[1][i*ND3]+1;
ipttl[2][i*ND3+23] = ipttl[2][i*ND3]+2;
// 22(0,1,2)
ipttl[0][i*ND3+24] = ipttl[0][i*ND3]+2;
ipttl[1][i*ND3+24] = ipttl[1][i*ND3]+2;
ipttl[0][i*ND3+25] = ipttl[0][i*ND3]+2;
ipttl[1][i*ND3+25] = ipttl[1][i*ND3]+2;
ipttl[2][i*ND3+25] = ipttl[2][i*ND3]+1;
ipttl[0][i*ND3+26] = ipttl[0][i*ND3]+2;
ipttl[1][i*ND3+26] = ipttl[1][i*ND3]+2;
ipttl[2][i*ND3+26] = ipttl[2][i*ND3]+2;
}
}
nttl *= ND3;
/**
* read cell type of each cell
*/
for (i=0;i<NK/ND;i++) {
_lread(hFp,mapCell[i*ND],NI*NJ/ND/ND);
}
if (ND == 2) {
for (k=0;k<NK/ND;k++) {
for (j=NJ/ND-1;j>=0;j--) {
for (i=NI/ND-1;i>=0;i--) {
nCell = *(mapCell[ND*k]+j*NI/ND+i);
*(mapCell[ND*k]+ND*j*NI+ND*i)= nCell;
*(mapCell[ND*k]+ND*j*NI+ND*i+1) = nCell;
*(mapCell[ND*k]+(ND*j+1)*NI+ND*i) = nCell;
*(mapCell[ND*k]+(ND*j+1)*NI+ND*i+1) = nCell;
*(mapCell[ND*k+1]+ND*j*NI+ND*i) = nCell;
*(mapCell[ND*k+1]+ND*j*NI+ND*i+1) = nCell;
*(mapCell[ND*k+1]+(ND*j+1)*NI+ND*i) = nCell;
*(mapCell[ND*k+1]+(ND*j+1)*NI+ND*i+1) = nCell;
}
}
}
*(mapCell[32*ND]+25*ND*NJ+32*ND) = 3;
*(mapCell[32*ND]+(25*ND+1)*NJ+32*ND) = 3;
*(mapCell[32*ND+1]+25*ND*NJ+32*ND) = 3;
*(mapCell[32*ND+1]+(25*ND+1)*NJ+32*ND) = 3;
}
if (ND == 3) {
for (k=0;k<NK/ND;k++) {
for (j=NJ/ND-1;j>=0;j--) {
for (i=NI/ND-1;i>=0;i--) {
nCell = *(mapCell[ND*k]+j*NI/ND+i);
*(mapCell[ND*k]+ND*j*NI+ND*i)= nCell;
*(mapCell[ND*k]+ND*j*NI+ND*i+1) = nCell;
*(mapCell[ND*k]+ND*j*NI+ND*i+2) = nCell;
*(mapCell[ND*k]+(ND*j+1)*NI+ND*i) = nCell;
*(mapCell[ND*k]+(ND*j+1)*NI+ND*i+1) = nCell;
*(mapCell[ND*k]+(ND*j+1)*NI+ND*i+2) = nCell;
*(mapCell[ND*k]+(ND*j+2)*NI+ND*i) = nCell;
*(mapCell[ND*k]+(ND*j+2)*NI+ND*i+1) = nCell;
*(mapCell[ND*k]+(ND*j+2)*NI+ND*i+2) = nCell;
*(mapCell[ND*k+1]+ND*j*NI+ND*i)= nCell;
*(mapCell[ND*k+1]+ND*j*NI+ND*i+1) = nCell;
*(mapCell[ND*k+1]+ND*j*NI+ND*i+2) = nCell;
*(mapCell[ND*k+1]+(ND*j+1)*NI+ND*i) = nCell;
*(mapCell[ND*k+1]+(ND*j+1)*NI+ND*i+1) = nCell;
*(mapCell[ND*k+1]+(ND*j+1)*NI+ND*i+2) = nCell;
*(mapCell[ND*k+1]+(ND*j+2)*NI+ND*i) = nCell;
*(mapCell[ND*k+1]+(ND*j+2)*NI+ND*i+1) = nCell;
*(mapCell[ND*k+1]+(ND*j+2)*NI+ND*i+2) = nCell;
*(mapCell[ND*k+2]+ND*j*NI+ND*i)= nCell;
*(mapCell[ND*k+2]+ND*j*NI+ND*i+1) = nCell;
*(mapCell[ND*k+2]+ND*j*NI+ND*i+2) = nCell;
*(mapCell[ND*k+2]+(ND*j+1)*NI+ND*i) = nCell;
*(mapCell[ND*k+2]+(ND*j+1)*NI+ND*i+1) = nCell;
*(mapCell[ND*k+2]+(ND*j+1)*NI+ND*i+2) = nCell;
*(mapCell[ND*k+2]+(ND*j+2)*NI+ND*i) = nCell;
*(mapCell[ND*k+2]+(ND*j+2)*NI+ND*i+1) = nCell;
*(mapCell[ND*k+2]+(ND*j+2)*NI+ND*i+2) = nCell;
}
}
}
*(mapCell[32*ND]+25*ND*NJ+32*ND) = 3;
*(mapCell[32*ND]+(25*ND+1)*NJ+32*ND) = 3;
*(mapCell[32*ND]+(25*ND+2)*NJ+32*ND) = 3;
*(mapCell[32*ND+1]+25*ND*NJ+32*ND) = 3;
*(mapCell[32*ND+1]+(25*ND+1)*NJ+32*ND) = 3;
*(mapCell[32*ND+1]+(25*ND+2)*NJ+32*ND) = 3;
*(mapCell[32*ND+2]+25*ND*NJ+32*ND) = 3;
*(mapCell[32*ND+2]+(25*ND+1)*NJ+32*ND) = 3;
*(mapCell[32*ND+2]+(25*ND+2)*NJ+32*ND) = 3;
}
_lclose(hFp);
}
void freemapAPD(void) {
for (short int i=0;i<NK;i++) {
free(mapACT[i]);
free(mapCell[i]);
free(locXCT[i]);
}
free(iparm);
free(kmin);
free(kmax);
free(iStep);
}
void freebrs(void) {
short int i = 0;
for (i=0;i<3;i++) {
free(r[i]);
free(rn[i]);
}
for (i=0;i<NCELL;i++) {
free(ydata[i]);
}
}
void freemapAPDcs(void) {
for (short int i=0;i<NK;i++) {
free(mapAPD[i]);
free(mapSpeed[i]); //added by Zhu
}
}
void freemagcs(void) {
for (short int i=0;i<4;i++) {
free(mag[i]);
}
}
void freePOTcs(void) {
for(short i=0;i<NL;i++) {
free(POT_reduce[i]);//by sf 090622
free(POT[i]);
free(aw[i]);
}
free(POTi);
free(bw);
}
void freeXCTm(void)
{
for(short int i=0;i<NCYCL;i++) {
free(mapXCTm[i]);
}
}
void freeFibdir(void)
{
if (ANISO==1) {
for (short int i=0;i<3;i++) {
free(fibdir[i]);
}
}
}
// read position parameter of heart & call transfer matrix ----
void rdpos(void) {
void transf(void);
HFILE hFp;
short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'p');
//filepath.SetAt(index+2,'o');
//filepath.SetAt(index+3,'s');
//hFp = _lopen(filepath,OF_READ);
hFp=_lopen(dataPath+"tour.pos ",OF_READ);
if (hFp==HFILE_ERROR) {
fprintf(stdout,"Can not open pos file ! !\n");
fflush(stdout);
flag_flop=1;
return;
}
_lread(hFp,&HRTscale,4);
_lread(hFp,&HRTx0,4);
_lread(hFp,&HRTy0,4);
_lread(hFp,&HRTz0,4);
_lread(hFp,&phai,4);
_lread(hFp,&pusai,4);
_lread(hFp,&theta,4);
_lclose(hFp);
transf();
}
// Read heart
/**
* normal cell's para
*/
// APD parameters
// Cell sn atr anv hb bb pkj vtr ab1 ab2 ab3 ab4 ab5 ab6 ab7
// Parm 1 2 3 4 5 6 7 8 9 10 11 12 13 14
// 1 T0 30 10 0 0 0 0 0 0 0 0 0 0 0 0
// 2 T1 0 0 5 5 5 5 5 5 5 5 5 5 5 5
// 3 T2 0 0 100 100 100 105 75 75 75 75 75 75 75 75
// 4 T3 175 120 175 175 175 195 175 175 175 175 175 175 175 175
// 5 APR 170 100 210 210 210 250 200 200 200 200 200 200 200 200
// 6 FRT 205 140 320 320 320 345 295 295 295 295 295 295 295 295
// 7 V0 -90 -90 -90 -90 -90 -90 -90 -90 -90 -90 -90 -90 -90 -90
// 8 V1 30 -20 40 40 40 40 40 40 40 40 40 40 40 40
// 9 V2 30 -20 30 30 30 30 30 30 30 30 30 30 30 30
// 10 GRD 250 0 0 0 0 5 5 5 5 5 5 5 5 5
// 11 DCS 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 12 DVT 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 13 ECF 100 100 100 100 100 100 100 0 0 0 0 0 0 0
// 14 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 15 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 18 BCL 800 0 0 0 0 0 0 0 0 0 0 0 0 0
// 19 BN 1 0 0 0 0 0 0 0 0 0 0 0 0 0
// 20 inc 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 21 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 22 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 23 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 24 ICL 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 25 PRT 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 26 DLY 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 27 ACC 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 28 PBP 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 29 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 30 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 31 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// 32 CS 50 100 10 250 250 250 50 -12 0 0 0 0 0 0
// 33 DC 0 0 0 0 0 0 0 1 0 0 0 0 0 0
// 34 0 0 0 0 0 1 0 0 0 0 0 0 0 0
// 35 0 0 0 0 0 0 0 0 0 0 0 0 0 0
//
void rdAPDm(void)
{
short int npoint[NCELL],ixsmp[NCELL][100],num;
float ysmp[NCELL][100];
short int iT0,iT01,iT012,iT0123;
short int incr,iBN0,iBCL,iBN,ntstp,iS2ST,iS2CL,iS2BN;
float dx0,dx1,dx2,dx01,dx02,dx10,dx12,dx20,dx21,a,b;
HFILE hFp;
short int icell,i,j,k,icurv,index;
// hFp=_lopen("f:/apd/apdapd.5",READ);
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'a');
//filepath.SetAt(index+2,'p');
//filepath.SetAt(index+3,'d');
//hFp=_lopen(filepath,OF_READ);
hFp=_lopen(dataPath+"tour.apd ",OF_READ);
if(hFp==HFILE_ERROR) {
fprintf(stdout,"Can not open apd file ! !\n");
fflush(stdout);
flag_flop=1;
return;
}
for(icell=0;icell<NCELL;icell++) {
_lread(hFp,iparm+icell*NPARM,NPARM*2);
*(iparm+icell*35+0) = *(iparm+icell*35+0)*ND;
*(iparm+icell*35+1) = *(iparm+icell*35+1)*ND;
*(iparm+icell*35+2) = *(iparm+icell*35+2)*ND;
*(iparm+icell*35+3) = *(iparm+icell*35+3)*ND;
*(iparm+icell*35+4) = *(iparm+icell*35+4)*ND;
*(iparm+icell*35+5) = *(iparm+icell*35+5)*ND;
//*(iparm+icell*35+10) = *(iparm+icell*35+10)*ND;
_lread(hFp,&npoint[icell],2);
for(j=0;j<npoint[icell];j++) {
_lread(hFp,&ixsmp[icell][j],2);
ixsmp[icell][j] = ixsmp[icell][j]*ND;
_lread(hFp,&ysmp[icell][j],4);
}
}
_lclose(hFp);
mBCL=0;
miBN=0;
mS2ST=0;
mS2CL=0;
mS2BN=0;
maxXctStep=0;
for(icell=0;icell<NCELL;icell++) {
incr=*(iparm+icell*NPARM+19);
if(incr<0) {
iBN0=1-100/incr;
if(*(iparm+icell*NPARM+18)>iBN0) *(iparm+icell*NPARM+18)=iBN0;
}
//iBCL=*(iparm+icell*NPARM+17); //basic cycle length
*(iparm+icell*NPARM+17) = *(iparm+icell*NPARM+17)*ND;
// S2, additional stimulus
*(iparm+icell*NPARM+14) = *(iparm+icell*NPARM+14)*ND;
*(iparm+icell*NPARM+15) = *(iparm+icell*NPARM+15)*ND;
iS2ST=*(iparm+icell*NPARM+14);
iS2CL=*(iparm+icell*NPARM+15);
iS2BN=*(iparm+icell*NPARM+16);
iBCL=*(iparm+icell*NPARM+17); //basic cycle length
iBN=*(iparm+icell*NPARM+18); // beat number
ntstp=(iBN*iBCL+iBN*(iBN-1)*iBCL*incr/200)/3;
if(iBCL>mBCL) mBCL=iBCL;
if(iBN>miBN) miBN=iBN;
if(iS2ST>mS2ST) mS2ST=iS2ST;
if(iS2CL>mS2CL) mS2CL=iS2CL;
if(iS2BN>mS2BN) mS2BN=iS2BN;
if(ntstp>maxXctStep) maxXctStep=ntstp;
// CL increament: % --> TS
*(iparm+icell*NPARM+19)=iBCL*incr*ND/300;
*(iparm+icell*NPARM+17)=iBCL/3;
// iparm(18) <-- total pacing time
*(iparm+icell*NPARM+18)=ntstp;
// FRP <-- FRP-ARP
*(iparm+icell*NPARM+5)=*(iparm+icell*NPARM+5)-*(iparm+icell*NPARM+4);
// intrinsic CL: ms --> TS
*(iparm+icell*NPARM+23)=*(iparm+icell*NPARM+23)/3;
//-- conduction speed(100*)CS:
// CS(m/s) --> CS(2*1.5 mm/3ms) --> CS*2(cell/Step) ----
/*<Comment by ALF> why 100*/
*(iparm+icell*NPARM+31)=*(iparm+icell*NPARM+31)*2;
// we only have two points to represent his bundle and bundle branches
//if (icell == 3) *(iparm+(icell-1)*NPARM+31)=*(iparm+(icell-1)*NPARM+31)/11;
//if (icell == 3) *(iparm+(icell-1)*NPARM+31)=100;
//if (icell == 5) *(iparm+(icell-1)*NPARM+31)=*(iparm+(icell-1)*NPARM+31)/ND;
// initialize ydata
for(short int n=0;n<1000*ND;n++)
*(ydata[icell]+n)=(float)*(iparm+icell*NPARM+6);
//for (int ii=0; ii < NPARM; ii++ )
//TRACE("\nCell %2d %2d %d", icell, ii, *(iparm+icell*NPARM+ii));
}
// --- data set ---
for(icurv=0;icurv<NCELL;icurv++) {
num=npoint[icurv];
iT0=*(iparm+icurv*NPARM);
iT01=iT0+*(iparm+icurv*NPARM+1);
iT012=iT01+*(iparm+icurv*NPARM+2);
iT0123=iT012+*(iparm+icurv*NPARM+3);
//---- lenth of APD ------
la012[icurv]=iT012;
la0123[icurv]=iT0123;
// --- t = phased 0 ---
for(i=0;i<=(iT0-1);i++) { // < ? July 4, 1996
// +++++ iparm(icurv,6), the real value +++++
a=(float)(-*(iparm+icurv*NPARM+6)+*(iparm+icurv*NPARM+7))
/(float)*(iparm+icurv*NPARM);
b=(float)*(iparm+icurv*NPARM+6);
*(ydata[icurv]+i)=a*i+b;
}
*(ydata[icurv]+iT0)=(float)*(iparm+icurv*NPARM+7);
// --- t = phase 1 ---
if(iT01>iT0) {
for(i=(iT0+1);i<=(iT01-1);i++) {
a=(float)(*(iparm+icurv*NPARM+8)-*(iparm+icurv*NPARM+7))
/(float)*(iparm+icurv*NPARM+1);
b=(float)*(iparm+icurv*NPARM+7)-a*iT0;
*(ydata[icurv]+i)=a*i+b;
}
}
// --- t = phase 2 ---
for(i=iT01;i<=iT012;i++)
*(ydata[icurv]+i)=(float)*(iparm+NPARM*icurv+8);
//---- t= phase 3 ----
for(i=(iT012+1);i<=iT0123;i++) {
if((i<ixsmp[icurv][num-3])&&(i>ixsmp[icurv][num-2])) {
dx0=(float)(i-ixsmp[icurv][num-1]);
dx1=(float)(i-ixsmp[icurv][num-2]);
dx2=(float)(i-ixsmp[icurv][num-3]);
dx01=(float)(ixsmp[icurv][num-1]-ixsmp[icurv][num-2]);
dx02=(float)(ixsmp[icurv][num-1]-ixsmp[icurv][num-3]);
dx10=(float)(ixsmp[icurv][num-2]-ixsmp[icurv][num-1]);
dx12=(float)(ixsmp[icurv][num-2]-ixsmp[icurv][num-3]);
dx20=(float)(ixsmp[icurv][num-3]-ixsmp[icurv][num-1]);
dx21=(float)(ixsmp[icurv][num-3]-ixsmp[icurv][num-2]);
*(ydata[icurv]+i)=dx1*dx2*ysmp[icurv][num-1]/dx01/dx02
+dx0*dx2*ysmp[icurv][num-2]/dx10/dx12
+dx0*dx1*ysmp[icurv][num-3]/dx20/dx21;
}
for(k=2;k<num-3;k++) {
if(i==ixsmp[icurv][k+1])
*(ydata[icurv]+i)=ysmp[icurv][k+1];
else if(i==ixsmp[icurv][k])
*(ydata[icurv]+i)=ysmp[icurv][k];
else if((i<ixsmp[icurv][k])&&(i>ixsmp[icurv][k+1])) {
dx0=(float)(i-ixsmp[icurv][k+1]);
dx1=(float)(i-ixsmp[icurv][k]);
dx2=(float)(i-ixsmp[icurv][k-1]);
dx01=(float)(ixsmp[icurv][k+1]-ixsmp[icurv][k]);
dx02=(float)(ixsmp[icurv][k+1]-ixsmp[icurv][k-1]);
dx10=(float)(ixsmp[icurv][k]-ixsmp[icurv][k+1]);
dx12=(float)(ixsmp[icurv][k]-ixsmp[icurv][k-1]);
dx20=(float)(ixsmp[icurv][k-1]-ixsmp[icurv][k+1]);
dx21=(float)(ixsmp[icurv][k-1]-ixsmp[icurv][k]);
*(ydata[icurv]+i)=dx1*dx2*ysmp[icurv][k+1]/dx01/dx02
+dx0*dx2*ysmp[icurv][k]/dx10/dx12
+dx0*dx1*ysmp[icurv][k-1]/dx20/dx21;
}
}
}
}
}
/**
* transform matrix for (i,j,k) -> (x, y, z)
*/
// transf: coordinate transformation
void transf(void) {
short int i,j,k;
float a2[3][3],a;
float a1[3][6]={
1.0, 0.5, 0.5, -0.5, -0.5, 0.0,
0.0,0.866, 0.2886,0.866, 0.2886,-0.5773,
0.0, 0.0,-0.8165, 0.0,-0.8165,-0.8165};
float rd=1.745329252e-2;
float ph=rd*phai;
float ps=rd*pusai;
float th=rd*theta;
float cph=cos(ph);
float sph=sin(ph);
float cps=cos(ps);
float sps=sin(ps);
float cth=cos(th);
float sth=sin(th);
a2[0][0]=cps*cph-cth*sps*sph;
a2[0][1]=-sps*cph-cth*cps*sph;
a2[0][2]=sth*sph;
a2[1][0]=cps*sph+cth*sps*cph;
a2[1][1]=-sps*sph+cth*cps*cph;
a2[1][2]=-sth*cph;
a2[2][0]=sps*sth;
a2[2][1]=cps*sth;
a2[2][2]=cth;
for (i=0;i<3;i++) {
for (j=0;j<6;j++) {
a=0;
for (k=0;k<3;k++)
a=a+a2[i][k]*a1[k][j];
tmswf[i][j]=(float)(a*HRTscale/ND);
}
}
}
/**
* torso position
*/
// Read the data of nodes and derivatives
void rdnod(void) {
short int i;
HFILE hFp;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'n');
//filepath.SetAt(index+2,'o');
//filepath.SetAt(index+3,'d');
//hFp=_lopen(filepath,OF_READ);
hFp=_lopen(dataPath+"tour.nod ",OF_READ);
if (hFp==HFILE_ERROR) {
fprintf(stdout,"Can not open nod file ! !\n");
flag_flop=1;
return;
}
_lread(hFp,&nPos,2);
if (nPos>NL) nPos=NL;
for (i=0;i<nPos;i++) {
_lread(hFp,r[0]+i,4);
_lread(hFp,r[1]+i,4);
_lread(hFp,r[2]+i,4);
}
for (i=0;i<nPos;i++) {
_lread(hFp,rn[0]+i,4);
_lread(hFp,rn[1]+i,4);
_lread(hFp,rn[2]+i,4);
}
_lclose(hFp);
}
// Read electrode position file
void rdelc(void) {
short int i;
float eps[3][6],weight[3][6];
short int member[3][6];
HFILE hFp;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'e');
//filepath.SetAt(index+2,'l');
//filepath.SetAt(index+3,'c');
//hFp=_lopen(filepath,OF_READ);
hFp=_lopen(dataPath+"tour.elc ",OF_READ);
if (hFp==HFILE_ERROR) {
fprintf(stdout,"Can not open elc file ! !\n");
flag_flop=1;
return;
}
for (i=0;i<6;i++) {
_lread(hFp,&eps[0][i],4);
_lread(hFp,&eps[1][i],4);
_lread(hFp,&eps[2][i],4);
}
for (i=0;i<6;i++) {
_lread(hFp,&weight[0][i],4);
_lread(hFp,&weight[1][i],4);
_lread(hFp,&weight[2][i],4);
}
for(i=0;i<6;i++) {
_lread(hFp,&member[0][i],2);
_lread(hFp,&member[1][i],2);
_lread(hFp,&member[2][i],2);
}
_lread(hFp,&nv[0],2);
_lread(hFp,&nv[1],2);
_lread(hFp,&nv[2],2);
_lclose(hFp);
}
void locfile(void) {
short int i,j,k;
totalCell=0;
for (k=0;k<NK;k++)
for (j=0;j<NJ;j++)
for (i=0;i<NI;i++) {
if ((*(mapCell[k]+j*NI+i)>0)&&(*(mapCell[k]+j*NI+i)<NCELL+1)) {
*(locXCT[k]+j*NI+i)=totalCell;
totalCell++;
} else *(locXCT[k]+j*NI+i)=-1;
}
//TRACE("\nTotal Cells: %d", totalCell);
}
// Geometric information of heart model
void geoinfc(void) {
int i0, ii, endoAn, iendo;
short int i,j,k;
short int l, m, flag;
short int endoAx[20000*ND3];
short int endoAy[20000*ND3];
short int endoAz[20000*ND3];
//short int iseqx[12]={-1,-1, 0, 0, 1, 0, 1, 1, 0, 0,-1, 0 };
//short int iseqy[12]={ 0, 1, 1, 0, 0, 1, 0,-1,-1, 0, 0,-1 };
//short int iseqz[12]={ 0, 0, 0,-1,-1,-1, 0, 0, 0, 1, 1, 1 };
/**
* coor-delta matrix
*/
short int iseqx[6]={-1, 0, 0, 1, 0, 0};
short int iseqy[6]={ 0, 1, 0, 0,-1, 0};
short int iseqz[6]={ 0, 0,-1, 0, 0, 1};
// Margins of each (i,j)
/**
* max_min value of k of model at (i,j)
*/
// get kmin and kmax for each [NI][NJ] frame
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
k = 0;
while (k < NK) {
if (*(mapCell[k]+j*NI+i)>0) { /*<Comment by ALF> have cell, some duplicate point by using this method*/
*(kmin+j*NI+i)=k;
for (k=NK-1;k>-1;k--) {
if (*(mapCell[k]+j*NI+i)>0) {
*(kmax+j*NI+i)=k;
k = NK*2;
break;
}
}
}
k++;
}
if (k < NK*2) {
*(kmin+j*NI+i)=NK+1;
*(kmax+j*NI+i)=0;
}
}
}
//-------------------- modified by ALF at 2008-8-19 begin -------------------->
//add: get epicardial triangle's vertex position, also some duplicate point in epicXYZ
//Nepic = NI*NJ*2; //by sf
epicX.reserve(Nepic);
epicY.reserve(Nepic);
epicZ.reserve(Nepic);
for (i=0; i<NI; ++i) {
for (j=0; j<NJ; ++j) {
epicX.push_back(i);
epicY.push_back(j);
epicZ.push_back(*(kmin+j*NI+i)-1);
}
}
for (i=0; i<NI; ++i) {
for (j=0; j<NJ; ++j) {
epicX.push_back(i);
epicY.push_back(j);
epicZ.push_back(*(kmax+j*NI+i)+1);
}
}
for (i=0; i<Nepic; ++i) {
epicX_old[i]=epicX[i];
epicY_old[i]=epicY[i];
epicZ_old[i]=epicZ[i];
}
//-------------------- modified by ALF at 2008-8-19 end --------------------<
// get kTop: minimum of kmin and
// kBtm: maximum of kmax
kTop=NK+1;
kBtm=0;
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(kmin+j*NI+i)<kTop) kTop=*(kmin+j*NI+i);
if (*(kmax+j*NI+i)>kBtm) kBtm=*(kmax+j*NI+i);
}
}
// get kVtr: ventricular position, so heart can be divided into two parts
for (k=kTop;k<=kBtm;k++) {
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if ((*(mapCell[k]+j*NI+i)>4)&&(*(mapCell[k]+j*NI+i)<15)) {
kVtr=k; // ventricular position
i = NI;
j = NJ;
k = kBtm;
}
}
}
}
nHB=0;
nBB=0;
// get Bundle branches & his branches' position
for (k=kTop;k<=kBtm;k++) {
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
//if (*(mapCell[k]+j*NI+i)==4) {
// change to BB
if (*(mapCell[k]+j*NI+i)==5) {
iBB[0][nBB]=i;
iBB[1][nBB]=j;
iBB[2][nBB]=k;
nBB++;
} else if (*(mapCell[k]+j*NI+i)==4) {
iHB[0][nHB]=i;
iHB[1][nHB]=j;
iHB[2][nHB]=k;
nHB++;
}
}
}
}
// get endocardial positions
for (m=0;m<2;m++) {
for (ii=0;ii<20000*ND3;ii++) {
endoAx[ii]=0;
endoAy[ii]=0;
endoAz[ii]=0;
}
if (m==0) {
endoAx[0]=24*ND;
endoAy[0]=30*ND;
endoAz[0]=40*ND;
for (ii=0;ii<NENDO*ND3;ii++) {
endoBx[ii]=0;
endoBy[ii]=0;
endoBz[ii]=0;
}
} else if (m==1) {
endoAx[0]=26*ND;
endoAy[0]=13*ND;
endoAz[0]=36*ND;
for (ii=0;ii<NENDO*ND3;ii++) {
endoCx[ii]=0;
endoCy[ii]=0;
endoCz[ii]=0;
}
}
// TRACE("\nFirst %d",*(mapCell[endoAz[0]]+endoAy[0]*NI+endoAx[0]));
*(mapCell[endoAz[0]]+endoAy[0]*NI+endoAx[0])=30;
iendo=0;
endoAn=1;
i0=0;
while (i0<endoAn) {
flag=0;
for (l=0;l<6;l++) {
i=endoAx[i0]+iseqx[l];
if((i<0)||(i>NI)) continue;
j=endoAy[i0]+iseqy[l];
if((j<0)||(j>NJ)) continue;
k=endoAz[i0]+iseqz[l];
if((k<kTop)||(k>kBtm)) continue;
if (*(mapCell[k]+j*NI+i)==0) { /*<Comment by ALF> find the normal direction */
*(mapCell[k]+j*NI+i)=30; /*<Comment by ALF> 30 is only a value to make sure no confuse with valid type */
endoAx[endoAn]=i;
endoAy[endoAn]=j;
endoAz[endoAn]=k;
endoAn++;
}
if ((flag==0) && *(mapCell[k]+j*NI+i)>0 && *(mapCell[k]+j*NI+i)<16) {
if (m==0) {
endoBx[iendo]=endoAx[i0];
endoBy[iendo]=endoAy[i0];
endoBz[iendo]=endoAz[i0];
} else if (m==1) {
endoCx[iendo]=endoAx[i0];
endoCy[iendo]=endoAy[i0];
endoCz[iendo]=endoAz[i0];
}
iendo++;
flag=1;
}
}
i0++;
}
if (m==0) {
NendoB=iendo;
//TRACE("\nEndo B %d",NendoB);
} else if (m==1) {
NendoC=iendo;
//TRACE("\nEndo C %d",NendoC);
}
}
for (k=kTop;k<=kBtm;k++) {
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(mapCell[k]+j*NI+i)==30) {
*(mapCell[k]+j*NI+i)=0;
}
}
}
}
}
// ---- set parameter of the anisotropy ------
//
// velocicy(l)=0.5 m/s ==> dist*3/9msec
// velocity(t)/velocity(l)=1/3
// velocity(t)/velocity(l)=0.42?
// resistance(t)/(l)=9
// according to Clerc
// see Robert D.E., Circ. Res. 44:701-712,1979
// Input: dist
// Output: vl2[10],vt2[10],rrat1
//
void setaniso(void) {
short int i, ltrat;
float vl,vt,vrat,rrat;
float fct;
ltrat=2;
//ltrat=1;
fct=1.1;
vrat=1.0/ltrat;
rrat=1.0/9;
//vrat=1.0;
//rrat=1.0;
rrat1=rrat-1.;
for (i=0; i<10;i++) {
vl=fct*(i+1)*HRTscale/ND;
vt=vl*vrat;
vl2[i]=vl*vl;
vt2[i]=vt*vt;
//TRACE("\ni vl2 vt2 %2d %f %f,", i, vl2[i],vt2[i]);
}
}
//
// --- calculate out-products of 'cell-neighber vectors' ----
//
//
void neibdir (void) {
void ijktoxyz(short int [3], float [3]);
short int i, j;
short int istrt[3],iterm[3],iterm1[3];
float strt[3],term[3],dir[3];
float term1[3],dir1[3],r;
short int iseqx[12]={-1,-1, 0, 0, 1, 0, 1, 1, 0, 0,-1, 0 };
short int iseqy[12]={ 0, 1, 1, 0, 0, 1, 0,-1,-1, 0, 0,-1 };
short int iseqz[12]={ 0, 0, 0,-1,-1,-1, 0, 0, 0, 1, 1, 1 };
istrt[0]=0;
istrt[1]=0;
istrt[2]=0;
ijktoxyz(istrt,strt);
for (i=0;i<12;i++)
for (j=0;j<12;j++) {
if (i==j) {
prx[i][j]=0.;
pry[i][j]=0.;
prz[i][j]=0.;
continue;
}
if (i>5 && j>5) {
prx[i][j]=prx[i-6][j-6];
pry[i][j]=pry[i-6][j-6];
prz[i][j]=prz[i-6][j-6];
continue;
}
if (j>5) {
prx[i][j]=-prx[i][j-6];
pry[i][j]=-pry[i][j-6];
prz[i][j]=-prz[i][j-6];
continue;
}
if (i>5) {
prx[i][j]=-prx[i-6][j];
pry[i][j]=-pry[i-6][j];
prz[i][j]=-prz[i-6][j];
continue;
}
iterm[0]=iseqx[i];
iterm[1]=iseqy[i];
iterm[2]=iseqz[i];
ijktoxyz(iterm,term);
//linedir(strt,term,dir);
dir[0]=term[0]-strt[0];
dir[1]=term[1]-strt[1];
dir[2]=term[2]-strt[2];
r=sqrt(dir[0]*dir[0]+dir[1]*dir[1]+dir[2]*dir[2]);
dir[0]=dir[0]/r;
dir[1]=dir[1]/r;
dir[2]=dir[2]/r;
iterm1[0]=iseqx[j];
iterm1[1]=iseqy[j];
iterm1[2]=iseqz[j];
ijktoxyz(iterm1,term1);
//linedir(strt,term1,dir1);
dir1[0]=term1[0]-strt[0];
dir1[1]=term1[1]-strt[1];
dir1[2]=term1[2]-strt[2];
r=sqrt(dir1[0]*dir1[0]+dir1[1]*dir1[1]+dir1[2]*dir1[2]);
dir1[0]=dir1[0]/r;
dir1[1]=dir1[1]/r;
dir1[2]=dir1[2]/r;
//TRACE("\nlidir1 %f %f %f ",dir1[0],dir1[1],dir1[2]);
// outprod(dir,dir1,out);
prx[i][j]=dir[1]*dir1[2]-dir[2]*dir1[1];
pry[i][j]=dir[2]*dir1[0]-dir[0]*dir1[2];
prz[i][j]=dir[0]*dir1[1]-dir[1]*dir1[0];
r=sqrt(prx[i][j]*prx[i][j]+pry[i][j]*pry[i][j]+prz[i][j]*prz[i][j]);
prx[i][j]=prx[i][j]/r;
pry[i][j]=pry[i][j]/r;
prz[i][j]=prz[i][j]/r;
}
/*
for (i=0;i<6;i++)
for (j=0;j<6;j++) {
TRACE("\nneibdir %d %d %f %f %f ",i,j,prx[i][j],pry[i][j],prz[i][j]);
}
*/
}
//
// ---- fibplane direction angle ------
// all plane directions are in j=22 (assumed to be parallel
// to the septal plane
// for all directions, lines atart from (1,22,90) to
// (Note: Selectable)plane(1): (50,22,90)
// (assumed perpendicular to heart axis)
//
void fibplane (void) {
float getAngle(float [], float []);
void ijktoxyz(short int [], float []);
//void linedir(float [], float [], float []);
short int i, j, k, n;
short int iorg[3]={1,19,90};
short int iterm0[3]={1,19,1};
short int iterm[3];
float org[3];
float term0[3],term[3];
float dir0[3],dir[3];
float r;
float ang=1.;
float arch=1.;
float pai=3.1415926;
float delt;
// ---- angle per layer, max rotation angle=pi/2
//TRACE("\nmaxlayer= %d ",maxlay);
arch=pai/180.;
if (maxlay<=0) return;
//delt=(2./3.)*pai/maxlay;
delt=(1./4.)*pai/maxlay;
//TRACE("\ndelt/arch= %f ",delt/arch);
// ----- all in septal plane ----->
ijktoxyz(iorg,org);
ijktoxyz(iterm0,term0);
//linedir(org,term0,dir0);
dir0[0]=term0[0]-org[0];
dir0[1]=term0[1]-org[1];
dir0[2]=term0[2]-org[2];
r=sqrt(dir0[0]*dir0[0]+dir0[1]*dir0[1]+dir0[2]*dir0[2]);
planedir[0][0]=dir0[0]/r;
planedir[1][0]=dir0[1]/r;
planedir[2][0]=dir0[2]/r;
// --- search next planedir ---->
i=iterm0[0];
j=iterm0[1];
k=iterm0[2];
// TRACE("\nplanedir 0 %f %f %f",planedir[0][0],planedir[1][0],planedir[2][0]);
for (n=1; n<=maxlay; n++) {
do {
if (i<NI) {
i=i+1;
} else {
k=k+1;
}
iterm[0]=i;
iterm[1]=j;
iterm[2]=k;
ijktoxyz(iterm,term);
//linedir(org,term,dir);
dir[0]=term[0]-org[0];
dir[1]=term[1]-org[1];
dir[2]=term[2]-org[2];
r=sqrt(dir[0]*dir[0]+dir[1]*dir[1]+dir[2]*dir[2]);
dir[0]=dir[0]/r;
dir[1]=dir[1]/r;
dir[2]=dir[2]/r;
ang=getAngle(dir0,dir);
//TRACE("\n %f %f %f %f %f %f %f %d %d %d %f %f",dir0[0],dir0[1],dir0[2],
// dir[0],dir[1],dir[2],ang,i,j,k,ang*arch,n*delt);
} while (ang*arch < n*delt);
planedir[0][n]=dir[0];
planedir[1][n]=dir[1];
planedir[2][n]=dir[2];
//TRACE("\nplanedir %2d %f %f %f",n,planedir[0][n],planedir[1][n],planedir[2][n]);
}
// for test ---->
/*
for (n=0; n< maxlay; n++) {
for (m=0; m<3; m++) {
dir0[m]=planedir[m][n];
dir[m]=planedir[m][n+1];
}
ang=getAngle(dir0,dir);
TRACE("\nn,ang %d,%f",n,ang);
}
*/
// <---- test end
}
//
//******** fiber direction for each (i,j,k) *************
//
void fibdirct(void) {
float getAngle(float [], float []);
short int normdir(short int, short int, short int, float []);
short int i, j, k, nneib, iLayer;
char iCell;
int locfib;
// float test[3],test1[3];
float tmpx,tmpy,tmpz;
// float ang;
float pdirx,pdiry,pdirz,r;
//float dirx,diry,dirz;
float nordir[3];
//dirx=0.;
//diry=0.;
//dirz=0.;
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=0;i<NI;i++) {
locfib=*(locXCT[k]+j*NI+i);
iCell=*(mapCell[k]+j*NI+i);
if (iCell!=7) iLayer=0;
else iLayer=*(mapACT[k]+j*NI+i)+1;
if (locfib==-1) continue;
//if (*(MapLyr+locfib)<=0) continue;
//if (*(MapLyr+locfib) >= 30) continue;
if (iLayer<=0 || iLayer>=30) continue;
nneib=normdir(i,j,k,nordir);
r=sqrt(nordir[0]*nordir[0]+nordir[1]*nordir[1]+nordir[2]*nordir[2]);
//TRACE("\nnordir %2d %2d %2d %f %f %f %d",
// i+1,j+1,k+1,nordir[0],nordir[1],nordir[2],nneib);
if (r<0.0000001) continue;
pdirx=planedir[0][iLayer-1];
pdiry=planedir[1][iLayer-1];
pdirz=planedir[2][iLayer-1];
//TRACE("\npdir %f %f %f %d",pdirx,pdiry,pdirz,iLayer);
// --- fiberdir = planedir X normldir
tmpx=pdiry*nordir[2]-pdirz*nordir[1];
tmpy=pdirz*nordir[0]-pdirx*nordir[2];
tmpz=pdirx*nordir[1]-pdiry*nordir[0];
r=sqrt(tmpx*tmpx+tmpy*tmpy+tmpz*tmpz);
if (r<0.0000001) continue;
*(fibdir[0]+locfib)=tmpx/r;
*(fibdir[1]+locfib)=tmpy/r;
*(fibdir[2]+locfib)=tmpz/r;
//TRACE("\nfibdir %2d %2d %2d %f %f %f %d",i+1,j+1,k+1, *(fibdir[0]+locfib),
// *(fibdir[1]+locfib),*(fibdir[2]+locfib),locfib);
}
}
}
// ---- for test------>
/*
TRACE("\nj=22");
i=21;
for(k=58;k<62;k++) {
for(j=31;j<33;j++) {
for(n=0;n<3;n++) {
locfib=*(locXCT[k]+j*NI+i);
test[n]=*(fibdir[n]+locfib);
test1[n]=planedir[n][1];
}
ang=getAngle(test,test1);
TRACE("\n %d %d %d %f %f %f %f %d",
i,j,k,ang,test[0],test[1],test[2],locfib);
}
}
*/
}
//
// calculate normal direction of fibplane at cell i
//
short int normdir(short int icl,short int jcl,short int kcl,
float nordir[3]){
char iCell,jCell;
short int i,iLayer,jLayer;
short int iface[12];
int locnor, jloc;
short int jx,jy,jz,l;
float r,dirx,diry,dirz;
short int nneib;
short int iseqx[12]={-1,-1, 0, 0, 1, 0, 1, 1, 0, 0,-1, 0};
short int iseqy[12]={ 0, 1, 1, 0, 0, 1, 0,-1,-1, 0, 0,-1};
short int iseqz[12]={ 0, 0, 0,-1,-1,-1, 0, 0, 0, 1, 1, 1};
nneib=0;
r=0.;
nordir[0]=0.;
nordir[1]=0.;
nordir[2]=0.;
dirx=0.;
diry=0.;
dirz=0.;
for (i=0;i<3;i++) {
nordir[i]=0.;
}
for (i=0;i<12;i++) {
iface[i]=0;
}
locnor=*(locXCT[kcl]+jcl*NI+icl);
iCell=*(mapCell[kcl]+jcl*NI+icl);
if (iCell!=7) iLayer=0;
else iLayer=*(mapACT[kcl]+jcl*NI+icl)+1;
for (l=0;l<12;l++) {
jx=icl+iseqx[l];
if(jx<0 || jx>=NI) continue;
jy=jcl+iseqy[l];
if(jy<0 || jy>=NJ) continue;
jz=kcl+iseqz[l];
if(jz<0 || jz>=NK) continue;
jloc=*(locXCT[jz]+jy*NI+jx);
if(jloc==-1) continue;
jCell=*(mapCell[jz]+jy*NI+jx);
if (jCell!=7) jLayer=0;
else jLayer=*(mapACT[jz]+jy*NI+jx)+1;
//TRACE("\n%2d %2d %2d %d %d %d %d", jx+1,jy+1,jz+1,jCell,jLayer,iLayer,nneib);
if(jLayer<1) continue;
if(iLayer!=jLayer) continue;
iface[nneib]=l;
nneib=nneib+1;
}
// --- neglect fiber edge --->
if(nneib<=1) return nneib;
for(l=0;l<nneib-1;l++) {
dirx=dirx+prx[iface[l]][iface[l+1]];
diry=diry+pry[iface[l]][iface[l+1]];
dirz=dirz+prz[iface[l]][iface[l+1]];
}
// --- two neighbering points only --->
// --- in opposite --->
dirx=dirx+prx[iface[nneib-1]][iface[0]];
diry=diry+pry[iface[nneib-1]][iface[0]];
dirz=dirz+prz[iface[nneib-1]][iface[0]];
dirx=dirx/(1.*nneib);
diry=diry/(1.*nneib);
dirz=dirz/(1.*nneib);
r=sqrt(dirx*dirx+diry*diry+dirz*dirz);
if (r<0.00001) {
;//TRACE("\nicl,jcl,kcl,nneib,iface %d %d %d %d %d %d",
// icl,jcl,kcl,nneib,iface[0],iface[1]);
} else {
dirx=dirx/r;
diry=diry/r;
dirz=dirz/r;
}
nordir[0]=dirx;
nordir[1]=diry;
nordir[2]=dirz;
return nneib;
}
//
// ---- angle of two vectors ---
//
float getAngle (float vct1[3], float vct2[3]) {
short int n;
float pi=3.1415926;
float ang1=0.;
float sumv=0.;
float sumv1=0.;
float sumv2=0.;
for (n=0;n<3;n++) {
sumv1=sumv1+vct1[n]*vct1[n];
sumv2=sumv2+vct2[n]*vct2[n];
sumv=sumv+vct1[n]*vct2[n];
}
ang1=acos(sumv/sqrt(sumv1*sumv2))* 180. / pi;
return ang1;
}
//
// ----transform from I,J,K to Z,Y,Z -------
//
void ijktoxyz(short int ijk[3], float xyz[3]) {
xyz[0]=HRTx0+ijk[0]*tmswf[0][0]+ijk[1]*tmswf[0][1]+ijk[2]*tmswf[0][2];
xyz[1]=HRTy0+ijk[0]*tmswf[1][0]+ijk[1]*tmswf[1][1]+ijk[2]*tmswf[1][2];
xyz[2]=HRTz0+ijk[0]*tmswf[2][0]+ijk[1]*tmswf[2][1]+ijk[2]*tmswf[2][2];
}
//
// ----transform coordinate system to I,J,K to establish
// local coordinate system (shift and rotated) -------
//
// step1: shift old system to i,j,k
// the old coordinate system
// step2: rotate x,y,z system to fiber coordinate
// system so that Z axis has direction of
// fibdir(i,j,k) and X axis has direction
// fiber direction and Y axis=Z(x)X
// step3: solve equation
// x=l1*X+l2*Y+l3*Z
// y=m1*X+m2*Y+m3*Z
// z=n1*X+n2*Y+n3*Z
// where l,m,n is the dirction number of axises
// l=cos(alpha), m=cos(beta) and n=cos(theta)
//
float local(short int i, short int j, short int k) {
// ++++ d as a mark of whether the trasform is successful +++
float getAngle(float [], float []);
char iCell;
short int n,iLayer;
int locloc;
float r,d;
// -- step 1 --->
// x=ijk(1)*tmswf(1,1)+ijk(2)*tmswf(1,2)+ijk(3)*tmswf(1,3)
// y=ijk(1)*tmswf(2,1)+ijk(2)*tmswf(2,2)+ijk(3)*tmswf(2,3)
// z=ijk(1)*tmswf(3,1)+ijk(2)*tmswf(3,2)+ijk(3)*tmswf(3,3)
// ---step 2 : Y axis ---->
//
locloc=*(locXCT[k]+NI*j+i);
iCell=*(mapCell[k]+NI*j+i);
if (iCell!=7) iLayer=0;
else iLayer=*(mapACT[k]+j*NI+i)+1;
for (n=0; n<3; n++) {
zaxis[n]=*(fibdir[n]+locloc);
xaxis[n]=planedir[n][iLayer-1];
}
if (zaxis[0]<0.0000001 && zaxis[0]>-0.0000001 &&
zaxis[1]<0.0000001 && zaxis[1]>-0.0000001 &&
zaxis[2]<0.0000001 && zaxis[2]>-0.0000001 ) {
d=0.;
return d;
}
// call outprod(zaxis,xaxis,yaxis)
yaxis[0]=zaxis[1]*xaxis[2]-zaxis[2]*xaxis[1];
yaxis[1]=zaxis[2]*xaxis[0]-zaxis[0]*xaxis[2];
yaxis[2]=zaxis[0]*xaxis[1]-zaxis[1]*xaxis[0];
r=sqrt(yaxis[0]*yaxis[0]+
yaxis[1]*yaxis[1]+yaxis[2]*yaxis[2]);
if (r < 0.0000001) {
d=0.;
return d;
}
yaxis[0]=yaxis[0]/r;
yaxis[1]=yaxis[1]/r;
yaxis[2]=yaxis[2]/r;
// for test
// d=getAngle(xaxis,yaxis);
// TRACE("\nlocal %2d %2d %2d %f ",i,j,k,d);
// write(0,*) i,j,k,ang
// --- step 3 ---->
//
d=xaxis[0]*yaxis[1]*zaxis[2]+xaxis[1]*yaxis[2]*zaxis[0]
+xaxis[2]*yaxis[0]*zaxis[1]-xaxis[2]*yaxis[1]*zaxis[0]
-xaxis[0]*yaxis[2]*zaxis[1]-xaxis[1]*yaxis[0]*zaxis[2];
//TRACE("\nlocal %2d %2d %2d %d %f %f %f %f ",i+1,j+1,k+1,iLayer,xaxis[0],yaxis[0],zaxis[0],d);
return d;
}
//
// ******* calc anisotropic coeffeciante for i,j,k ********
//
void anfct(short int i, short int j, short int k, float v[3]) {
int locanf;
float f[3][3], af[3];
short int m, n;
float tmp,tmp1;
float u[3][3]={
1.,0.,0.,
0.,1.,0.,
0.,0.,1.};
locanf=*(locXCT[k]+NI*j+i);
for (m=0; m<3; m++) {
tmp1=*(fibdir[m]+locanf);
for (n=0; n<3; n++) {
tmp=*(fibdir[n]+locanf);
f[m][n]=u[m][n]+rrat1*tmp1*tmp; //corrected by zhu
}
}
for (m=0; m<3; m++) {
af[m]=f[m][0]*v[0]+f[m][1]*v[1]+f[m][2]*v[2];
}
for (m=0; m<3; m++) {
v[m]=af[m];
}
}
// Read the matrix data of the body (a344.data)
void rdmtx(void) {
short int i;
HFILE hFp;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'m');
//filepath.SetAt(index+2,'t');
//filepath.SetAt(index+3,'x');
//hFp = _lopen(filepath,OF_READ);
hFp=_lopen(dataPath+"tour.mtx ",OF_READ);
if (hFp==HFILE_ERROR)
{
fprintf(stdout,"can not create the file--mtx\n");
fflush(stdout);
flag_flop=1;
return;
}
for (i=0;i<NL;i++)
_lread(hFp,aw[i],NL*4);
_lread(hFp,bw,NL*4);
_lread(hFp,&alp,4);
_lclose(hFp);
}
// Make stiml data for inverse excitation
void stminvx(short int ivolpkj) {
short int ks, nspt, mk, i, j, k;
//idist=20*ND;
idist=ivolpkj;
ks=kVtr+idist;
for (k=kVtr; k<=kBtm; k++) {
for (i=0; i<NI; i++) {
for (j=0; j<NJ; j++) {
if (*(mapCell[k]+j*NI+i)>4)
*(mapAPD[k]+j*NI+i)=7;
else
*(mapAPD[k]+j*NI+i)=0;
}
}
}
mNub=0;
for (k=kVtr; k<NK; k++) {
// back view
for (j=0;j<NJ;j++) {
for (i=0;i<NI;i++) {
if (*(mapAPD[k]+j*NI+i)==6) {
break;
}
if (*(mapAPD[k]+j*NI+i)==7) {
*(mapAPD[k]+j*NI+i)=6;
if (k<=ks) {
*(mag[0]+mNub)=i;
*(mag[1]+mNub)=j;
*(mag[2]+mNub)=k;
*(mag[3]+mNub)=0;
mNub++;
}
break;
}
}
}
// front view
for (j=0; j<NJ; j++) {
for (i=NI-1; i>-1; i--) {
if (*(mapAPD[k]+j*NI+i)==6) {
break;
}
if (*(mapAPD[k]+j*NI+i)==7 ) {
*(mapAPD[k]+j*NI+i)=6;
if (k<=ks) {
*(mag[0]+mNub)=i;
*(mag[1]+mNub)=j;
*(mag[2]+mNub)=k;
*(mag[3]+mNub)=0;
mNub++;
}
break;
}
}
}
// right view
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(mapAPD[k]+j*NI+i)==6) {
break;
} else if (*(mapAPD[k]+j*NI+i)==7) {
*(mapAPD[k]+j*NI+i)=6;
if (k<=ks) {
*(mag[0]+mNub)=i;
*(mag[1]+mNub)=j;
*(mag[2]+mNub)=k;
*(mag[3]+mNub)=0;
mNub++;
}
break;
}
}
}
// left view
for (i=0;i<NI;i++) {
for (j=NJ-1;j>-1;j--) {
if (*(mapAPD[k]+j*NI+i)==6) {
break;
}
if (*(mapAPD[k]+j*NI+i)==7 ) {
*(mapAPD[k]+j*NI+i)=6;
if (k<=ks) {
*(mag[0]+mNub)=i;
*(mag[1]+mNub)=j;
*(mag[2]+mNub)=k;
*(mag[3]+mNub)=0;
mNub++;
}
break;
}
}
}
}
// the most low layer
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(mapAPD[kBtm]+j*NI+i)==7) {
*(mapAPD[kBtm]+j*NI+i)=6;
if (kBtm<=ks) {
*(mag[0]+mNub)=i;
*(mag[1]+mNub)=j;
*(mag[2]+mNub)=kBtm;
*(mag[3]+mNub)=0;
mNub++;
}
}
}
}
// septum setting
nspt=0;
for (k=kVtr;k<=kBtm;k++) {
for (i=0;i<NI;i++) {
mk=0;
for (j=1;j<NJ;j++) {
if ((*(mapAPD[k]+(j-1)*NI+i)==0)&&(*(mapAPD[k]+j*NI+i)==7)&&(mk==1)) {
nspt=nspt+1;
*(mapAPD[k]+j*NI+i)=6;
break;
}
if ((*(mapAPD[k]+j*NI+i)==7)&&(*(mapAPD[k]+(j+1)*NI+i)==0))
mk=1;
}
}
}
// testing
/*
TRACE("\n35\n");
for (j=0;j<NJ;j++) {
for (i=0;i<NI;i++) {
TRACE("%d",*(mapAPD[34]+j*NI+i));
}
TRACE("\n");
}
TRACE("\n40\n");
for (j=0;j<NJ;j++) {
for (i=0;i<NI;i++) {
TRACE("%d",*(mapAPD[39]+j*NI+i));
}
TRACE("\n");
}
*/
}
// APD distribution
void XCTinvcm(void) {
short int * iACTv[3];
short int jACTv[3][NI*ND],kACTv[3][NI*ND];
short int idir[12];
short int iseqx[12]={-1,-1, 0, 0, 1, 0, 1, 1, 0, 0,-1, 0 };
short int iseqy[12]={ 0, 1, 1, 0, 0, 1, 0,-1,-1, 0, 0,-1 };
short int iseqz[12]={ 0, 0, 0,-1,-1,-1, 0, 0, 0, 1, 1, 1 };
short int ix,iy,iz,jx,jy,jz,l;
short int jdist,jx0,jy0,jz0,mappu,mappu0;
long i,j,k,nACTv,mACTv,ncont;
long nblck,nStep,nbrch;
// unsigned char mappu,mappu0;
//idist=20*ND;
//------ initialize mapACT ---------
for(i=0;i<3;i++) {
iACTv[i]=(short int *) malloc(50000*ND3*2);
if(iACTv[i]==NULL) {
MessageBox(NULL,"Out of memory !",NULL,MB_OK);
flag_flop=1;
return;
}
}
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=0;i<NI;i++) {
*(mapACT[k]+j*NI+i)=0;
}
}
}
for(i=0;i<3;i++) {
for(j=0;j<50000*ND3;j++) {
*(iACTv[i]+j)=0;
}
}
nblck=0;
ic=0;
nACTv=0;
// mapAPD[]: a map contains value = 6 (boundary) and value = 7 (ventricular)
while (1) {
// TRACE("\nmNub = %d",mNub);
for(i=0;i<mNub;i++) { //for example, mNub=12322
if (*(mag[3]+i)!=ic) continue;
jx=*(mag[0]+i);
jy=*(mag[1]+i);
jz=*(mag[2]+i);
mappu=*(mapAPD[jz]+jy*NI+jx);
// nACTv=nACTv+1;
*(iACTv[0]+nACTv)=jx;
*(iACTv[1]+nACTv)=jy;
*(iACTv[2]+nACTv)=jz;
*(mapACT[jz]+jy*NI+jx)=ic;
// *(mapAPD[jz]+jy*NI+jx)=mappu+20;
*(mapAPD[jz]+jy*NI+jx)=mappu+20*ND;
nACTv++;
}
ic=ic+1;
// TRACE("\nnACTv= %2d %5d ", ic, nACTv);
nACTv=0;
for(k=kVtr;k<=kBtm;k++) {
for(i=0;i<NI;i++) {
for(j=0;j<NJ;j++) {
mappu=*(mapAPD[k]+j*NI+i);
if((mappu<6)||(mappu>7)) continue; // exculde 0 and others, if any
ncont=0;
for(l=0;l<12;l++) {
idir[l]=0;
ix=i+iseqx[l];
if((ix<0)||(ix>(NI-1))) continue;
iy=j+iseqy[l];
if((iy<0)||(iy>(NJ-1))) continue;
iz=k+iseqz[l];
if((iz<kVtr)||(iz>kBtm)) continue;
mappu0=*(mapAPD[iz]+iy*NI+ix);
if((mappu0<20*ND+6)||(mappu0>20*ND+7)) continue;
ncont=ncont+1;
}
if(ncont==0) continue;
// if((mappu==6)||(mappu==7)) mappu=mappu+10;
// *(mapAPD[k]+j*NI+i)=mappu;
if((mappu==6)||(mappu==7)) *(mapAPD[k]+j*NI+i)+=10*ND;
// nACTv=nACTv+1;
*(iACTv[0]+nACTv)=i;
*(iACTv[1]+nACTv)=j;
*(iACTv[2]+nACTv)=k;
*(mapACT[k]+j*NI+i)=ic;
nACTv++;
//if(k==120 && *(mapACT[k]+j*NI+i) > 0) {
// TRACE(" %d ",*(mapACT[k]+j*NI+i));
//}
}
}
}
// Conductive system
mACTv=nACTv;
for (i=0;i<nACTv;i++) {
jx=*(iACTv[0]+i);
jy=*(iACTv[1]+i);
jz=*(iACTv[2]+i);
mappu=*(mapAPD[jz]+jy*NI+jx);
if (mappu != 10*ND+6)
continue;
jACTv[0][0]=jx;
jACTv[1][0]=jy;
jACTv[2][0]=jz;
nStep=0;
nbrch=1;
jdist=1;
while (1) {
for (j=0;j<nbrch;j++) {
jx0=jACTv[0][j];
jy0=jACTv[1][j];
jz0=jACTv[2][j];
for (l=0;l<12;l++) {
jx=jx0+iseqx[l];
if ((jx<=-1)||(jx>NI-1)) continue; // <0
jy=jy0+iseqy[l];
if ((jy<=-1)||(jy>NJ-1)) continue; // <0
jz=jz0+iseqz[l];
if ((jz<kVtr)||(jz>kBtm)) continue;
mappu=*(mapAPD[jz]+jy*NI+jx);
if (mappu!=6) continue;
kACTv[0][nStep]=jx;
kACTv[1][nStep]=jy;
kACTv[2][nStep]=jz;
nStep++;
*(iACTv[0]+mACTv)=jx;
*(iACTv[1]+mACTv)=jy;
*(iACTv[2]+mACTv)=jz;
*(mapACT[jz]+jy*NI+jx)=ic;
*(mapAPD[jz]+jy*NI+jx)=mappu+10*ND;
mACTv++;
}
}
if (nStep==0) break;
jdist=jdist+1;
for (k=0;k<nStep;k++) {
jACTv[0][k]=kACTv[0][k];
jACTv[1][k]=kACTv[1][k];
jACTv[2][k]=kACTv[2][k];
}
if (jdist>=idist) break;
nbrch=nStep;
nStep=0;
}
}
nACTv=mACTv;
// The next circle
for (i=0;i<NI;i++)
for (j=0;j<NJ;j++)
for (k=kVtr;k<=kBtm;k++) {
mappu=*(mapAPD[k]+j*NI+i);
if ((mappu>30*ND+7)||(mappu<10*ND+6)) continue;
*(mapAPD[k]+j*NI+i)=mappu+10*ND;
}
if((nblck!=0)&&(nACTv==0)) break;
nblck=nblck+nACTv;
}
maxlay=ic+1;
// Display
// Steps were needed to compute excitation
// ventricular processes are completed.
// total excited units = nblck
// TRACE("\n%d steps were needed to compute excitation, total excited units = %d ", ic, nblck);
/*
TRACE("\nk= 35\n");
for (i=0; i<NI; i++) {
TRACE("i= %d\n",i);
for (j=0; j<NJ; j++) {
TRACE("%2d",*(mapACT[35-1]+j*NI+i));
}
}
TRACE("\nk= 40\n");
for (i=0; i<NI; i++) {
TRACE("i= %d\n",i);
for (j=0; j<NJ; j++) {
TRACE("%2d",*(mapACT[40-1]+j*NI+i));
}
}
*/
for (i=0;i<3;i++) {
free(iACTv[i]);
}
}
// mapACT <-- deference of Phase 2 from defined value (ms)
void savACT(void) {
char iCell;
short int i,j,k,m;
int idev,init,md;
HFILE hFp;
for (k=0;k<NK;k++)
for (i=0;i<NI;i++)
for (j=0;j<NJ;j++) {
if (*(mapACT[k]+j*NI+i)<1) {
*(mapAPD[k]+j*NI+i)=*(mapACT[k]+j*NI+i);
continue;
}
iCell=*(mapCell[k]+j*NI+i);
if (iCell==15) {
*(mapAPD[k]+j*NI+i)=*(mapACT[k]+j*NI+i);
continue;
}
//*(mapACT[k]+j*NI+i)*= *(iparm+(iCell-1)*NPARM+9);
*(mapAPD[k]+j*NI+i)=*(mapACT[k]+j*NI+i)*(*(iparm+(iCell-1)*NPARM+9));
}
// Random distribution of the APD
for (iCell=1;iCell<=NCELL;iCell++) {
if (*(iparm+(iCell-1)*NPARM+11)<=0) continue;
idev=*(iparm+(iCell-1)*NPARM+11)*(*(iparm+(iCell-1)*NPARM+2));
init=idev;
for (k=0;k<NK;k++) {
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(mapCell[k]+j*NI+i)!=iCell)
continue;
init=init*65+1;
md=init%256;
*(mapAPD[k]+j*NI+i) = (short int)(idev*(md-128)/12800);
init=md;
}
}
}
}
// Save file of ACT
CFile f;
CFileException e;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'a');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'t');
//hFp=_lcreat(dataPath+"tour.act ",0);
//if (hFp==HFILE_ERROR)
//{
// fprintf(stdout,"can not create the file--act\n");
// fflush(stdout);
// return;
//}
if (!f.Open( dataPath+"tour.act ", CFile::modeCreate | CFile::modeWrite, &e )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e.m_cause << "\n";
#endif
}
// if (!f.Open( filepath, CFile::modeCreate | CFile::modeWrite, &e )) {
//#ifdef _DEBUG
// afxDump << "File could not be opened " << e.m_cause << "\n";
//#endif
// }
f.Write(kmin,2*NI*NJ);
f.Write(kmax,2*NI*NJ);
f.Write(&ic,2);
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(kmin+j*NI+i)==NK+1)
continue;
for (k=*(kmin+j*NI+i);k<=*(kmax+j*NI+i);k++)
m=*(mapAPD[k]+j*NI+i)/6/ND;
f.Write(&m,2); // hui modify from 1 to 2
}
}
f.Close();
}
// mapACT <-- deference of Phase 2 from defined value (ms)
/*
void savACT(int myid) {
char iCell;
short int i,j,k,m;
int idev,init,md;
HFILE hFp;
for (k=0;k<NK;k++)
for (i=0;i<NI;i++)
for (j=0;j<NJ;j++) {
if (*(mapACT[k]+j*NI+i)<1) {
*(mapAPD[k]+j*NI+i)=*(mapACT[k]+j*NI+i);
continue;
}
iCell=*(mapCell[k]+j*NI+i);
if (iCell==15) {
*(mapAPD[k]+j*NI+i)=*(mapACT[k]+j*NI+i);
continue;
}
//*(mapACT[k]+j*NI+i)*= *(iparm+(iCell-1)*NPARM+9);
*(mapAPD[k]+j*NI+i)=*(mapACT[k]+j*NI+i)*(*(iparm+(iCell-1)*NPARM+9));
}
// Random distribution of the APD
for (iCell=1;iCell<=NCELL;iCell++) {
if (*(iparm+(iCell-1)*NPARM+11)<=0) continue;
idev=*(iparm+(iCell-1)*NPARM+11)*(*(iparm+(iCell-1)*NPARM+2));
init=idev;
for (k=0;k<NK;k++) {
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(mapCell[k]+j*NI+i)!=iCell)
continue;
init=init*65+1;
md=init%256;
*(mapAPD[k]+j*NI+i) = (short int)(idev*(md-128)/12800);
init=md;
}
}
}
}
// Save file of ACT
CFile f;
CFileException e;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'a');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'t');
//hFp=_lcreat(dataPath+"tour.act ",0);
//if (hFp==HFILE_ERROR)
//{
// fprintf(stdout,"can not create the file--act\n");
// fflush(stdout);
// return;
//}
if (myid==0){
if (!f.Open( dataPath+"tour.act ", CFile::modeCreate | CFile::modeWrite, &e )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e.m_cause << "\n";
#endif
}
// if (!f.Open( filepath, CFile::modeCreate | CFile::modeWrite, &e )) {
//#ifdef _DEBUG
// afxDump << "File could not be opened " << e.m_cause << "\n";
//#endif
// }
f.Write(kmin,2*NI*NJ);
f.Write(kmax,2*NI*NJ);
f.Write(&ic,2);
};
for (i=0;i<NI;i++) {
for (j=0;j<NJ;j++) {
if (*(kmin+j*NI+i)==NK+1)
continue;
for (k=*(kmin+j*NI+i);k<=*(kmax+j*NI+i);k++)
m=*(mapAPD[k]+j*NI+i)/6/ND;
if (myid==0) f.Write(&m,2); // hui modify from 1 to 2
}
}
if (myid==0) f.Close();
}
*/
// **************** sub excitation ********************
void XCTcalm(int myid) {
// FILE *fp;
void wtXCTm(short int,short int,short int,short int);
void bbDLYm(short int,short int,short int);
void rdXCTm(short int,short int,short int,short int);
short int itmp, tmp;
short int iStm,ires,irp,irel,ist,kBB;
float phsft,mxDLY,mACCl,icross,delt;
char mCell,iCell,kCell;
short int *iACTv[4];
short int *iACTvOld[4];
short int *jACTv[4];
short int *kACTv[4];
short int *iXCT[NK];
short int *iXCTapd[NK];
short int *iXCTOld[NK];
short int iseqx[12]={-1,-1, 0, 0, 1, 0, 1, 1, 0, 0,-1, 0};
short int iseqy[12]={ 0, 1, 1, 0, 0, 1, 0,-1,-1, 0, 0,-1};
short int iseqz[12]={ 0, 0, 0,-1,-1,-1, 0, 0, 0, 1, 1, 1};
short int ix,iy,iz,jx,jy,jz,iv,l;
short int jdist,jx0,jy0,jz0,is,ICL,ivel;
short int iSTOP, iS1S2, dS1S2Old, iCell5Ex;
long i,j,k,nACTv,mACTv,nACTvOld;
long nblck,nStep,nbrch;
// >>>>>>> aniso >>>>>>
float xani,yani,zani,dani,elp;
float dxani,dyani,dzani;
short int itms1=0;
// ---- for vtr aniso use
// storing the ellipsoid propagation times ---
//--------- maximum excitation time Step: maxXctStep -------------
for(i=0;i<4;i++) {
iACTv[i] = (short int *) malloc(50000*ND3*2);
iACTvOld[i] = (short int *) malloc(50000*ND3*2);
jACTv[i] = (short int *) malloc(50000*ND3*2);
kACTv[i] = (short int *) malloc(50000*ND3*2);
if((iACTv[i]==NULL)||(iACTvOld[i]==NULL)||
(jACTv[i]==NULL)||(kACTv[i]==NULL)) {
MessageBox(NULL,"Out of memory !",NULL,MB_OK);
return;
}
}
for(i=0;i<NK;i++) {
iXCT[i] = (short int *) malloc(NI*NJ*2);
iXCTapd[i] = (short int *) malloc(NI*NJ*2);
iXCTOld[i] = (short int *) malloc(NI*NJ*2);
if((iXCT[i]==NULL)||(iXCTOld[i]==NULL)) {
MessageBox(NULL,"Out of memory !",NULL,MB_OK);
return;
}
}
for(i=0;i<4;i++) {
for(j=0;j<50000*ND3;j++) {
*(iACTv[i]+j)=0;
*(iACTvOld[i]+j)=0;
*(jACTv[i]+j)=0;
*(kACTv[i]+j)=0;
}
}
// --- file mapXCT is initialized with INFTIME ----
for(i=0;i<NCYCL;i++) {
for(j=0;j<50000*ND3;j++) {
*(mapXCTm[i]+j)=INFTIME;
}
}
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=0;i<NI;i++) {
*(iXCT[k]+j*NI+i)=INFTIME;
*(iXCTapd[k]+j*NI+i)=0;
*(iXCTOld[k]+j*NI+i)=INFTIME;
}
}
}
mxcycle=0;
short int tested[NCELL];
for(i=0;i<NCELL;i++)
tested[i]=0;
for(i=0;i<nttl;i++) {
jx=ipttl[0][i]; /*<Comment by ALF> pos of ith cell*/
jy=ipttl[1][i];
jz=ipttl[2][i];
iCell=*(mapCell[jz]+jy*NI+jx); /*<Comment by ALF> cell type index */
if(tested[iCell-1]==0)
{*(iparm+(iCell-1)*NPARM+18)+=ipttl[3][i];tested[iCell-1]=1;
if (iCell!=1) {*(iparm+(1-1)*NPARM+18)+=ipttl[3][i];//maxXctStep+=ipttl[3][i];
}
}
//TRACE("\nNTTL (%3d %3d %3d) %2d",jx,jy,jz,iCell);
// set pacemake time of no. 5 cells
if (iCell==5) {
ipstm[0][i]=100*ND/(ipttl[3][i]+1);
if((ipstm[0][i]*ipttl[3][i])<100*ND) ipstm[0][i]+=1;
//ipstm[0][i]=100/(ipttl[3][i]+1);
//if((ipstm[0][i]*ipttl[3][i])<100) ipstm[0][i]+=1;
//TRACE("\nCell 5, (%d %d %d) %d %d",jx,jy,jz, ipttl[3][i],ipstm[0][i]);
continue;
}
// iparm(n,18) = BCL basic cycle length (ms) of pacing
// iparm(n,20) = inc increament of BCL(ms/cycle)
ipstm[0][i]=*(iparm+(iCell-1)*NPARM+17);
ipstm[1][i]=*(iparm+(iCell-1)*NPARM+19);
ipstm[2][i]=0;
}
nblck=0;
ic=0;
nACTv=0;
iS1S2=0;
iCell5Ex=0;
// ------ stimulus: pacemaker spontanous firing -------
while (1) {
// In this loop, ipttl[3][i] is mainly used to
// decide ipstm[0][i] and itself
jx=0;
jy=0;
jz=0;
iStm=0;
excited=0;
for (i=0;i<nttl;i++) {
jx=ipttl[0][i];
jy=ipttl[1][i];
jz=ipttl[2][i];
iStm=ipttl[3][i];
iCell=*(mapCell[jz]+jy*NI+jx);
//TRACE("\nStimulus (%3d %3d %3d)%2d %d %d",jx,jy,jz,iCell,iStm, mxcycle);
//TRACE("\nbreak1 mxcycle=%d NCYCL=%d ic=%d iCell=%d, iStm=%d, mS2BN=%d,ipstm=%d",mxcycle, NCYCL,ic, iCell, iStm,*(iparm+(iCell-1)*NPARM+18),ipstm[0][i]);
if (iCell==5) continue; // ignore BB
if (iStm != ic) continue;
// ic: i-th time Step
// nACTv: number of exitation cells at ic time but cellType != 5 (BB)
// --- end ---
//TRACE("\nbreak1 mxcycle=%d NCYCL=%d ic=%d iCell=%d, iStm=%d",mxcycle, NCYCL,ic, iCell, iStm);
nACTv=nACTv+1;
*(iACTv[0]+nACTv)=jx;
*(iACTv[1]+nACTv)=jy;
*(iACTv[2]+nACTv)=jz;
*(iACTv[3]+nACTv)=*(iparm+(iCell-1)*NPARM+31); /*<Comment by ALF> iparm store each cell's parameters*/
// iparm(n,32): conduction speed
wtXCTm(ic,jx,jy,jz);
//if (jx==101 && jy==77 && jz==6) TRACE("\nA mxcycle=%d at ic=%d, iCell=%d",mxcycle,ic,iCell);
//if (iCell <3) TRACE("\nA %d %d %d %d %d %d",iCell,jx,jy,jz,ic,nACTv);
// write to file
// mxcycle: maximum cycle
if(mxcycle>=NCYCL) {
break;
}
// --- store current time to iXCT and last time to iXCTOld -->
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx); // init is INFTIME
*(iXCT[jz]+jy*NI+jx)=ic;
excited=1;
// Update ipttl[3][i]
// iparm(n,18) = BCL: basic cycle length (ms) of pacing
// Normally, only SN has this parameter > 0
/*if(*(iparm+(iCell-1)*NPARM+17)>0) {
if ((iS1S2==1) && (mS2BN>1)) {
itmp=ipttl[3][i]+mS2CL;
mS2BN--;
} else {
itmp=ipttl[3][i]+ipstm[0][i];
}
dS1S2Old=ipstm[2][i];
ipstm[0][i] = ipstm[0][i] + ipstm[1][i];
iCell5Ex=0;
// ipstm[1][i] is the step
// iparm(n,19) = pBN: beat number
// judge by ipttl[3][i]
if(itmp>*(iparm+(iCell-1)*NPARM+18)) continue;
if ((mS2ST/3 > ipttl[3][i]) &&(mS2ST/3 < itmp)) {
ipttl[3][i]=(short int)(mS2ST/3);
iS1S2=1;
} else {
ipttl[3][i]=itmp;
}
ipstm[2][i]=itmp-ipttl[3][i];
//TRACE("\nTime=%d, %d, %d, %d, %d %d",ic,itmp,ipttl[3][i],dS1S2Old, ipstm[0][i],ipstm[1][i]);
continue;
}*/
if(*(iparm+(iCell-1)*NPARM+17)>0) {
if (iCell==1) {
itmp=ipttl[3][i]+ipstm[0][i];
dS1S2Old=ipstm[2][i];
ipstm[0][i] = ipstm[0][i] + ipstm[1][i];
iCell5Ex=0;
if(itmp>*(iparm+(iCell-1)*NPARM+18)) continue;
ipttl[3][i]=itmp; continue;}
else
{
itmp=ipttl[3][i]+ipstm[0][i];
dS1S2Old=ipstm[2][i];
ipstm[0][i] = ipstm[0][i] + ipstm[1][i];
iCell5Ex=0;
if(itmp>*(iparm+(iCell-1)*NPARM+18)-ipstm[0][i]+3) continue;
ipttl[3][i]=itmp;
}
continue;
}
// iparm(n,24) = ICL: intrinsic cycle length(ms)
ipttl[3][i] = ipttl[3][i] + *(iparm+(iCell-1)*NPARM+23);
}
// ---- display the excitation number ----
// go to next Step
nblck = nblck + nACTv;
//TRACE("\nmxcycle =%d Step=%3d, number=%ld nblck=%ld ",mxcycle,ic,nACTv, nblck);
ic = ic + 1;
//TRACE("\nbreak2 ic=%d maxXctStep=%d ",ic, maxXctStep);
if (ic>=maxXctStep) break;
if (nACTv == 0) continue;
/**
* very important
*/
// --------- propagation (2000)------------>
nACTvOld=0;
// nACTv: at moment t, the number of excited cells
for (i=1;i<=nACTv;i++) {
excited=1;
ix=*(iACTv[0]+i);
iy=*(iACTv[1]+i);
iz=*(iACTv[2]+i);
iv=*(iACTv[3]+i);
iCell=*(mapCell[iz]+iy*NI+ix);
//if (ix == 64 && iy == 50 && iz == 64) TRACE("\nB AVN %d",iCell);
//----------- low conduction speed part ----------->
// iparm(n,32): conduction speed
if (*(iparm+(iCell-1)*NPARM+31)<=0) continue;
if (iCell==5) iCell5Ex=1;
//if (iCell==8) TRACE("\nCell=8 %d, %d, %d, ic=%d, %d %d",ix,iy,iz,ic,iv,mBCL);
// 100 = Conduction Speed of ATR?
if (iv<100) {
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=ix;
*(iACTvOld[1]+nACTvOld)=iy;
*(iACTvOld[2]+nACTvOld)=iz;
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)+*(mapSpeed[iz]+iy*NI+ix); //added by zhu
if (iCell==5) {
/*ibbDLY=0;
bbDLYm(ix,iy,iz);
if (ibbDLY>0)
*(iACTvOld[3]+nACTvOld)=iv+ibbDLY;
TRACE("\nBB, %d",*(iACTvOld[3]+nACTvOld));
*/
ibbDLY=0;
// Add for BB interval by hui wang
ibbSTEP=0;
bbDLYm(ix,iy,iz);
// End of add for BB interval by hui wang, modified by zhu
if (ibbDLY>0) {ibbSTEP+=nbbSTEP;ibbDLY=100*ND/(ibbSTEP+1);}
if(ibbDLY>0 && (ibbDLY*ibbSTEP)<100*ND) ibbDLY+=1;
if (ibbDLY>0)
*(iACTvOld[3]+nACTvOld)=iv+ibbDLY;
else
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31);
continue;
}
/*if (iCell==3 || iCell==6) {
if (*(iXCTOld[iz]+iy*NI+ix)==INFTIME)
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31);
else {
irel = *(iXCT[iz]+iy*NI+ix)-*(iXCTOld[iz]+iy*NI+ix)-(*(iparm+NPARM*(iCell-1)+4)+*(mapAPD[iz]+iy*NI+ix))/3;
//irel = *(iXCT[iz]+iy*NI+ix)-*(iXCTOld[iz]+iy*NI+ix)-(*(iparm+NPARM*(iCell-1)+4))/3;
irel = 3*irel;
if (irel<*(iparm+NPARM*(iCell-1)+5)) {
tmp=100+*(iparm+NPARM*(iCell-1)+32)
-irel*(*(iparm+NPARM*(iCell-1)+32))/(*(iparm+NPARM*(iCell-1)+5));
if (tmp!=0) {
ivel = 100*(*(iparm+NPARM*(iCell-1)+31))/tmp;
} else {
ivel=*(iparm+NPARM*(iCell-1)+31);
}
} else {
// <--- time of RRP stored in iparm(6) ---
ivel=*(iparm+NPARM*(iCell-1)+31);
}
*(iACTvOld[3]+nACTvOld)=iv+ivel;}
}*/
/*else if (iCell==3) {
if (iCell5Ex==0) {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)- dS1S2Old/20;
TRACE("\nCell=3 E dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else {
if (mBCL<600&&dS1S2Old<140/3) {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)- (dS1S2Old+67)/33;
TRACE("\nCell=3 A dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else if (mBCL<600&&dS1S2Old>=140/3) {
*(iACTvOld[3]+nACTvOld)=iv;
TRACE("\nCell=3 B dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else if (mBCL>=600&&dS1S2Old<=210/3) {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31);
TRACE("\nCell=3 C dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)- dS1S2Old/12;
TRACE("\nCell=3 D dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
}
}
}*/
continue;
}
// ------- neighbourhood search (2100) -------->
// go to iv > 100 situation and set ires = the part of iv < 100
ires=iv-100*(int)(iv/100);
for (l=0;l<12;l++) {
jx=ix+iseqx[l];
if ((jx<=-1)||(jx>(NI-1))) continue; // >=0 <NI
jy=iy+iseqy[l];
if ((jy<=-1)||(jy>(NJ-1))) continue; // >=0 <NJ
jz=iz+iseqz[l];
if ((jz<=-1)||(jz>(NK-1))) continue; // >=0 <NK
// >>>>> aniso: within the ellpisoid ? >>>>>>>>>>>>
if (ANISO==1 && iCell==7) {
dani=local(ix,iy,iz);
//TRACE("\nx,y,z,dani, %2d %2d %2d %f",ix,iy,iz,dani);
// -- if can't solve local coordinates, treat as isotropic -->
if (dani > 0.0001) {
//lctran(iseqx[l],iseqy[l],iseqz[l],dani,xani,yani,zani);
xani=iseqx[l]*tmswf[0][0]+iseqy[l]*tmswf[0][1]+iseqz[l]*tmswf[0][2];
yani=iseqx[l]*tmswf[1][0]+iseqy[l]*tmswf[1][1]+iseqz[l]*tmswf[1][2];
zani=iseqx[l]*tmswf[2][0]+iseqy[l]*tmswf[2][1]+iseqz[l]*tmswf[2][2];
dxani=xani*yaxis[1]*zaxis[2]+yani*yaxis[2]*zaxis[0]
+zani*yaxis[0]*zaxis[1]-zani*yaxis[1]*zaxis[0]
-xani*yaxis[2]*zaxis[1]-yani*yaxis[0]*zaxis[2];
dyani=xaxis[0]*yani*zaxis[2]+xaxis[1]*zani*zaxis[0]
+xaxis[2]*xani*zaxis[1]-xaxis[2]*yani*zaxis[0]
-xaxis[0]*zani*zaxis[1]-xaxis[1]*xani*zaxis[2];
dzani=xaxis[0]*yaxis[1]*zani+xaxis[1]*yaxis[2]*xani
+xaxis[2]*yaxis[0]*yani-xaxis[2]*yaxis[1]*xani
-xaxis[0]*yaxis[2]*yani-xaxis[1]*yaxis[0]*zani;
xani=dxani/dani;
yani=dyani/dani;
zani=dzani/dani;
// itms=maps(ix,iy,iz)+1
//TRACE("\nd %f %f %f %f",dxani,xani,yaxis[0],zaxis[0]);
itms1=*(iXCTapd[iz]+iy*NI+ix);
elp=xani*xani/vt2[itms1]+
yani*yani/vt2[itms1]+
zani*zani/vl2[itms1];
// write(0,*) x,y,z,elp
// TRACE("\n %d %f",itms1,elp);
if (elp > 1.0) continue;
}
}
// <<<<<<<<<<<<<<<<<<<< aniso <<<<<<<<<<<<<<<<<<<
mCell=*(mapCell[jz]+jy*NI+jx);
if ((iCell<=7)&&(mCell<=7)&&(((iCell-mCell)>1)||
((iCell-mCell)<-1))) continue;
if ((*(iparm+NPARM*(mCell-1)+33)>0)&&( (mCell>7 && iCell>7 && mCell!=iCell) || (mCell<=7 && iCell>mCell) || (mCell>7 && !(iCell==mCell || iCell==2)))) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)>0)&&( (mCell<=7 && iCell>mCell) || (mCell>7 && !(iCell==mCell || iCell==2)))) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)>0)&&(iCell>mCell)) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)<0)&&(iCell<mCell)) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)<0)&&( (mCell<=7 && iCell<mCell) || (mCell>7 && !(iCell==mCell || iCell==7)))) continue;
if ((*(iparm+NPARM*(mCell-1)+33)<0)&&( (mCell>7 && iCell>7 && mCell!=iCell) || (mCell<=7 && iCell<mCell) || (mCell>7 && !(iCell==mCell || iCell==7)))) continue;
//if (jx == 64 && jy == 50 && jz == 64) TRACE("\nC AVN %d",mCell);
if (mCell<=0) continue; // continue;
if (mCell>=15) continue; // continue;
if (mCell==5) iCell5Ex=1;
//if (iCell==8) TRACE("\nCell=8 %d, %d, %d, ic=%d, %d %d",jx,jy,jz,ic);;
// --- coupling interval ------>
idltt=ic-*(iXCT[jz]+jy*NI+jx);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) idltt=INFTIME;
// --- change in cycle length ------>
idltc=idltt+*(iXCTOld[jz]+jy*NI+jx)-*(iXCT[jz]+jy*NI+jx);
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME) idltc=0;
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
*(iparm+(1-1)*NPARM+23)>0)
idltc = ic-*(iXCT[jz]+jy*NI+jx)-*(iparm+(1-1)*NPARM+23);
// rdXCT(ic,jx,jy,jz);
// irp = time in phase 2 + mapACT/3 +plateau of potential in phase 3 *idltc/100
// --- absolute refractory period ------>
//irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3
// +*(iparm+(mCell-1)*NPARM+10)*idltc/100;
irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3;
//if (mCell==6)
// irp=(*(iparm+NPARM*(mCell-1)+4))/3;
irel=idltt-irp;
// ++++++++ in absolute refractory period ? +++++++
if (irel<=0) continue;
//if (*(mapAPD[jz]+jy*NI+jx)>20) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>20 && idltc<0) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>30 && idltc<0) idltc = 3*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>30 && idltc>0) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>40 && idltc>0) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>40 && idltc>0) idltc = 2*idltc;
//if (mCell==3) TRACE("\nCell=3 %d, %d, %d,%d,%d,%d",irp,ic,*(iXCT[jz]+jy*NI+jx),*(mapAPD[jz]+jy*NI+jx),*(iparm+(mCell-1)*NPARM+10),idltc);
*(mapAPD[jz]+jy*NI+jx) += *(iparm+(mCell-1)*NPARM+10)*idltc*3/100; //added by Zhu
//if (mCell==3) TRACE("\nCell=3 %d, %d, %d",*(mapAPD[jz]+jy*NI+jx)/3,idltc,irp);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME && mCell==3) {irel=INFTIME;*(mapAPD[jz]+jy*NI+jx)=0;}
// --- find automaticity in stimul data ----
// iparm(n,24), ICL: intrinsic cycle length (ms)
iSTOP =0;
if (*(iparm+NPARM*(mCell-1)+23)>0) { // !=0 August 10, 1996
// <--- next touch time should be beyound ARP of the cell --
for (is=0;is<nttl;is++) {
if (jx!=ipttl[0][is]) continue;
if (jy!=ipttl[1][is]) continue;
if (jz!=ipttl[2][is]) continue;
// --- iparm(23) used for adjusting intermediate change
// of EP intrinsic cycle length --->
ICL = *(iparm+NPARM*(mCell-1)+23);
ist = ic-*(iXCT[jz]+jy*NI+jx);
// PRT: protection indicator
// --- no protection ---->
if (*(iparm+NPARM*(mCell-1)+24)==0) {
if (ist<=irp) continue; //{iSTOP=1;break;}
//if (iSTOP==1)
ipttl[3][is]=ic+ICL; // ICL/3
/******************/
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=jx;
*(iACTvOld[1]+nACTvOld)=jy;
*(iACTvOld[2]+nACTvOld)=jz;
*(iACTvOld[3]+nACTvOld)=*(iparm+NPARM*(mCell-1)+31)+ires;
wtXCTm(ic,jx,jy,jz);
if (mxcycle>=NCYCL) {iSTOP=1;break;}
//if (ic==*(iXCT[jz]+jy*NI+jx)) continue;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;//added to by zhu
irel=0;
excited=1;
TRACE("\n %d, %d %d %d %d %d",*(iXCTOld[jz]+jy*NI+jx),*(iXCT[jz]+jy*NI+jx),ic,jx,jy,jz);
/******************/
//iSTOP=1;
continue; //break; // rewrite condition
}
if (idltt==INFTIME) continue;
//ist = ic-*(iXCT[jz]+jy*NI+jx);
// if (ist<=irp) goto loop21; // August 10, 1996
if (ist<=irp) continue; //{iSTOP=1;break;}
phsft =(float)100.*(idltt/ICL);
mxDLY =(float)*(iparm+NPARM*(mCell-1)+25);
mACCl =(float)*(iparm+NPARM*(mCell-1)+26);
if (mxDLY == 0 && mACCl == 0) continue;
icross=(float)*(iparm+NPARM*(mCell-1)+27);
if (icross == 0 || icross == 100) continue;
if (phsft<=icross)
delt=phsft*mxDLY/icross;
else
delt=mACCl-(phsft-icross)*mACCl/(100-icross);
// -- store touch time --->
// -- modify next stimulating time --->
ipttl[3][is]=ipttl[3][is]+(int)(ICL*delt/100);
//TRACE("\ntime=%4d,ixt=%4d,idltt=%4d,icl=%4d,phsft=%4d,intermediate=%4d",
// ic, *(iXCT[jz]+jy*NI+jx),idltt,ICL, (int)phsft, ipttl[3][is]);
// change value after each touch time
// avoiding from successive modification by surrounding cells
if (ic==*(iXCT[jz]+jy*NI+jx)) continue;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
irel=0;
excited=1;
//iSTOP=1;
continue; //break; // rewrite condition
}
}
if (iSTOP==1) continue;
if (irel==0) continue;
// +++++ special processing for BB +++++
if (mCell==5) {
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
// Add for BB interval by hui wang modified by Zhu
// variable ibbSTEP, nbbSTEP are added to store steps by first BB
// ibbSTEP is a function in bbDLYm(i,j,k)
nbbSTEP=0;
ibbDLY=0;
ibbSTEP=0;
bbDLYm(jx,jy,jz);
nbbSTEP=ibbSTEP;
// end of add for BB interval by hui wang
//ic+=10; // add by hw, BB interval
//TRACE("\n nHB = %d, ic= %d",nHB,ic);
for(kBB=0;kBB<nBB;kBB++) {
jx=iBB[0][kBB];
jy=iBB[1][kBB];
jz=iBB[2][kBB];
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=jx;
*(iACTvOld[1]+nACTvOld)=jy;
*(iACTvOld[2]+nACTvOld)=jz;
//*(iACTvOld[3]+nACTvOld)=100;
ibbDLY=0;
// Add for BB interval by hui wang,modified by zhu
ibbSTEP=0;
bbDLYm(jx,jy,jz);
ibbSTEP+=nbbSTEP;
ibbDLY=100*ND/(ibbSTEP+1);
if((ibbDLY*ibbSTEP)<100*ND) ibbDLY+=1;
// End of add for BB interval by hui wang
*(iACTvOld[3]+nACTvOld)=ibbDLY;
wtXCTm(ic,jx,jy,jz);
//if (jx==101 && jy==77 && jz==6) TRACE("\nB mxcycle=%d at ic=%d, iCell=%d",mxcycle,ic,mCell);
//if (mCell >2 && mCell <6) TRACE("\nB %d %d %d %d %d %d",mCell,jx,jy,jz,ic,nACTvOld);
if (mxcycle>=NCYCL) break;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
excited=1;
}
continue;
}
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=jx;
*(iACTvOld[1]+nACTvOld)=jy;
*(iACTvOld[2]+nACTvOld)=jz;
wtXCTm(ic,jx,jy,jz);
//TRACE("\nbreak3 mxcycle=%d NCYCL=%d ",mxcycle, NCYCL);
if (mxcycle>=NCYCL) break;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
irel = 3*irel;
//if (*(iXCTOld[jz]+jy*NI+jx)!=INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
// irel>*(iparm+NPARM*(mCell-1)+5))
// *(mapAPD[jz]+jy*NI+jx)=0;
// time of RRP stored in iparm(6)
if ((irel)<*(iparm+NPARM*(mCell-1)+5)) {
tmp=100+*(iparm+NPARM*(mCell-1)+32)
-irel*(*(iparm+NPARM*(mCell-1)+32))/(*(iparm+NPARM*(mCell-1)+5));
if (tmp!=0) {
ivel = 100*(*(iparm+NPARM*(mCell-1)+31))/tmp;
} else {
ivel=*(iparm+NPARM*(mCell-1)+31);
}
} else {
// <--- time of RRP stored in iparm(6) ---
ivel=*(iparm+NPARM*(mCell-1)+31);
}
*(mapSpeed[jz]+jy*NI+jx)=ivel-*(iparm+NPARM*(mCell-1)+31);//added by Zhu
// test results
//TRACE("\nmcell=%4d,ic=%4d,idltt=%4d,idltc=%4d,ivel=%4d",mCell,ic,idltt,idltc,ivel);
if (iCell!=mCell) {
if (mCell == 5) {
bbDLYm(jx,jy,jz);
*(iACTvOld[3]+nACTvOld)=ibbDLY;
//TRACE("\n BB2=%d, %d %d (%d %d %d) ic=%d ",*(iACTvOld[3]+nACTvOld),iv,ibbDLY,ix,iy,iz, ic);
continue;
}
*(iACTvOld[3]+nACTvOld)=ivel;
continue;
}
*(iACTvOld[3]+nACTvOld)=ivel+ires;
}
// <------- END of neighbourhood search (2100) -----
// >>>>>>>> anisotropy >>>>>
if (ANISO==1 && iCell == 7) {
// ltrat==2;
if (*(iXCTapd[iz]+iy*NI+ix) < 2) {
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=ix;
*(iACTvOld[1]+nACTvOld)=iy;
*(iACTvOld[2]+nACTvOld)=iz;
//*(iACTvOld[3]+nACTvOld)=ires+
// *(iparm+NPARM*(iCell-1)+31);
*(iACTvOld[3]+nACTvOld)=ires+
*(iparm+NPARM*(iCell-1)+31)+*(mapSpeed[iz]+iy*NI+ix);
*(iXCTapd[iz]+iy*NI+ix)+=1;
} else {
*(iXCTapd[iz]+iy*NI+ix)=0;
}
}
// <<<<<<<<<<<
}
// <------- END of propagation (2000) -----
// +++++++++++ for high speed ++++++++
mACTv=nACTvOld;
// ------- propagation (1000) -------->
for(i=1;i<=nACTvOld;i++) {
idist=(int)(*(iACTvOld[3]+i)/100);
if (idist<2) continue;
*(jACTv[0]+1)=*(iACTvOld[0]+i);
*(jACTv[1]+1)=*(iACTvOld[1]+i);
*(jACTv[2]+1)=*(iACTvOld[2]+i);
ires=*(iACTvOld[3]+i)-idist*100;
nStep=0;
nbrch=1;
jdist=1;
while (1) {
for (j=1;j<=nbrch;j++) {
jx0=*(jACTv[0]+j);
jy0=*(jACTv[1]+j);
jz0=*(jACTv[2]+j);
mCell=*(mapCell[jz0]+jy0*NI+jx0);
if (mCell==5) iCell5Ex=1;
for (l=0;l<12;l++) {
jx=jx0+iseqx[l];
if ((jx<=-1)||(jx>(NI-1))) continue; // <0 or >=NI
jy=jy0+iseqy[l];
if ((jy<=-1)||(jy>(NJ-1))) continue; // <0 or >=NJ
jz=jz0+iseqz[l];
if ((jz<=-1)||(jz>(NK-1))) continue; // <0 or >=NK
kCell = *(mapCell[jz]+jy*NI+jx);
//if (jx == 64 && jy == 50 && jz == 64) TRACE("\nE AVN %d",kCell);
if (kCell != mCell) continue;
//++++++++ in effective refractory period ? +++++++
// >>>>> aniso: within the ellpisoid ? >>>>>>>>>>>>
if (ANISO==1 && mCell==7) {
dani=local(jx0,jy0,jz0);
//TRACE("\nx,y,z,dani, %2d %2d %2d %f",ix,iy,iz,dani);
// -- if can't solve local coordinates, treat as isotropic -->
if (dani > 0.0001) {
//lctran(iseqx[l],iseqy[l],iseqz[l],dani,xani,yani,zani);
xani=iseqx[l]*tmswf[0][0]+iseqy[l]*tmswf[0][1]+iseqz[l]*tmswf[0][2];
yani=iseqx[l]*tmswf[1][0]+iseqy[l]*tmswf[1][1]+iseqz[l]*tmswf[1][2];
zani=iseqx[l]*tmswf[2][0]+iseqy[l]*tmswf[2][1]+iseqz[l]*tmswf[2][2];
dxani=xani*yaxis[1]*zaxis[2]+yani*yaxis[2]*zaxis[0]
+zani*yaxis[0]*zaxis[1]-zani*yaxis[1]*zaxis[0]
-xani*yaxis[2]*zaxis[1]-yani*yaxis[0]*zaxis[2];
dyani=xaxis[0]*yani*zaxis[2]+xaxis[1]*zani*zaxis[0]
+xaxis[2]*xani*zaxis[1]-xaxis[2]*yani*zaxis[0]
-xaxis[0]*zani*zaxis[1]-xaxis[1]*xani*zaxis[2];
dzani=xaxis[0]*yaxis[1]*zani+xaxis[1]*yaxis[2]*xani
+xaxis[2]*yaxis[0]*yani-xaxis[2]*yaxis[1]*xani
-xaxis[0]*yaxis[2]*yani-xaxis[1]*yaxis[0]*zani;
xani=dxani/dani;
yani=dyani/dani;
zani=dzani/dani;
// itms=maps(ix,iy,iz)+1
//TRACE("\nd %f %f %f %f",dxani,xani,yaxis[0],zaxis[0]);
itms1=*(iXCTapd[jz0]+jy0*NI+jx0);
elp=xani*xani/vt2[itms1]+
yani*yani/vt2[itms1]+
zani*zani/vl2[itms1];
// write(0,*) x,y,z,elp
// TRACE("\n %d %f",itms1,elp);
if (elp > 1.0) continue;
}
}
idltt=ic-*(iXCT[jz]+jy*NI+jx);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) idltt=INFTIME;
// --- change in cycle length ------>
idltc=idltt+*(iXCTOld[jz]+jy*NI+jx)-*(iXCT[jz]+jy*NI+jx);
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME) idltc=0;
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
*(iparm+(1-1)*NPARM+23)>0)
idltc = ic-*(iXCT[jz]+jy*NI+jx)-*(iparm+(1-1)*NPARM+23);
// rdXCT(ic,jx,jy,jz);
// irp = time in phase 2 + mapACT/3 +plateau of potential in phase 3 *idltc/100
// --- absolute refractory period ------>
//irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3
// +*(iparm+(mCell-1)*NPARM+10)*idltc/100;
irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3;
//if (mCell==6)
// irp=(*(iparm+NPARM*(mCell-1)+4))/3;
irel=idltt-irp;
if (irel<=0) continue; // continue;
if (*(iXCT[jz]+jy*NI+jx)==INFTIME && mCell==3) {irel=INFTIME;*(mapAPD[jz]+jy*NI+jx)=0;}
*(mapAPD[jz]+jy*NI+jx) += *(iparm+(mCell-1)*NPARM+10)*idltc*3/100; //added by Zhu
/*
idltt=ic-*(iXCT[jz]+jy*NI+jx);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) idltt=INFTIME;
idltc=idltt+*(iXCTOld[jz]+jy*NI+jx)-*(iXCT[jz]+jy*NI+jx);
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME) idltc=0;
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
*(iparm+(1-1)*NPARM+23)>0)
idltc = ic-*(iXCT[jz]+jy*NI+jx)-*(iparm+(1-1)*NPARM+23);
// rdXCT(ic,jx,jy,jz);
// irp=(*(iparm+NPARM*(kCell-1)+4)+*(mapACT[jz]+jy*NI+jx))/3+
// *(iparm+(kCell-1)*NPARM+10)*idltc/100;
//irp=(*(iparm+NPARM*(kCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3+
// *(iparm+(kCell-1)*NPARM+10)*idltc/100;
irp=(*(iparm+NPARM*(kCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3;
*(mapAPD[jz]+jy*NI+jx) += *(iparm+(kCell-1)*NPARM+10)*idltc*3/100; //added by Zhu
irel=idltt-irp;
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) irel=INFTIME;
*/
irel = 3*irel;
//if (*(iXCTOld[jz]+jy*NI+jx)!=INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
// irel>=*(iparm+NPARM*(kCell-1)+5))
// *(mapAPD[jz]+jy*NI+jx)=0;
if ((irel)<*(iparm+NPARM*(kCell-1)+5)) {
tmp=100+*(iparm+NPARM*(mCell-1)+32)
-irel*(*(iparm+NPARM*(mCell-1)+32))/(*(iparm+NPARM*(mCell-1)+5));
if (tmp!=0) {
ivel = 100*(*(iparm+NPARM*(mCell-1)+31))/tmp;
} else {
ivel=*(iparm+NPARM*(mCell-1)+31);
}
} else {
ivel=*(iparm+NPARM*(kCell-1)+31);
}
*(mapSpeed[jz]+jy*NI+jx)=ivel-*(iparm+NPARM*(kCell-1)+31);//added by Zhu
nStep=nStep+1;
*(kACTv[0]+nStep)=jx;
*(kACTv[1]+nStep)=jy;
*(kACTv[2]+nStep)=jz;
// nStep++;
mACTv=mACTv+1;
*(iACTvOld[0]+mACTv)=jx;
*(iACTvOld[1]+mACTv)=jy;
*(iACTvOld[2]+mACTv)=jz;
*(iACTvOld[3]+mACTv)=ivel+ires;
// mACTv++;
// TRACE(" D%d,",mACTv);
wtXCTm(ic,jx,jy,jz);
//if (jx==101 && jy==77 && jz==6) TRACE("\nD mxcycle=%d at ic=%d, iCell=%d",mxcycle,ic,kCell);
//if (kCell >2 && kCell <6) TRACE("\nD %d %d %d %d %d %d",kCell,jx,jy,jz,ic,mACTv);
//TRACE("\nbreak4 mxcycle=%d NCYCL=%d ",mxcycle, NCYCL);
if (mxcycle>=NCYCL) {
//TRACE("\nbreak5 iSTOP=%d mxcycle=%d,NCYCL=%d",iSTOP, mxcycle, NCYCL);
iSTOP =1;
break;
}
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
excited=1;
}
/*// >>>>>>>> anisotropy >>>>>
if (ANISO==1 && mCell == 7) {
// ltrat==3;
if (*(iXCTapd[jz0]+jy0*NI+jx0) < 3) {
mACTv=mACTv+1;
*(iACTvOld[0]+mACTv)=jx0;
*(iACTvOld[1]+mACTv)=jy0;
*(iACTvOld[2]+mACTv)=jz0;
*(iACTvOld[3]+mACTv)=ivel+ires;
//*(iACTvOld[3]+nACTvOld)=ires+
// *(iparm+NPARM*(iCell-1)+31);
*(iACTvOld[3]+nACTvOld)=ires+
*(iparm+NPARM*(mCell-1)+31)+*(mapSpeed[jz0]+jy0*NI+jx0);
*(iXCTapd[jz0]+jy0*NI+jx0)+=1;
} else {
*(iXCTapd[jz0]+jy0*NI+jx0)=0;
}
}
// <<<<<<<<<<<*/
if (iSTOP ==1) break;
}
if (iSTOP ==1) break;
if (nStep==0) break; // continue;
jdist=jdist+1;
if (jdist>=idist) break; // continue;
for(k=1;k<=nStep;k++) {
*(jACTv[0]+k)=*(kACTv[0]+k);
*(jACTv[1]+k)=*(kACTv[1]+k);
*(jACTv[2]+k)=*(kACTv[2]+k);
}
nbrch=nStep;
nStep=0;
}
}
//TRACE("\nbreak5 iSTOP=%d ",iSTOP);
if (iSTOP ==1) break;
// <------- END of propagation (1000) -------------
if (excited == 0) break;
nACTv=mACTv;
// nblck=nblck+nACTv;
for(i=1;i<=nACTv;i++) {
for(j=0;j<4;j++) {
*(iACTv[j]+i)=*(iACTvOld[j]+i);
}
}
} // END of whole while loop
TRACE("\nmxcycle=%d",mxcycle);
mxcycle++; // hui
// add HB info
for (itmp=0; itmp<50*ND; itmp++) {
for (tmp=0;tmp<NCYCL;tmp++) {
vHB[tmp][itmp]=0;
}
}
for (itmp=0; itmp<nHB; itmp++) {
l=iHB[0][itmp];
j=iHB[1][itmp];
k=iHB[2][itmp];
if (itmp==0) i=*(locXCT[k]+j*NJ+l); // Consider only the point near AV Node
for (tmp=0;tmp<mxcycle;tmp++) {
vHB[tmp][itmp]=*(mapXCTm[tmp]+i);
}
}
// Save
CFile f;
CFileException e;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'x');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'t');
if (myid==0){
if (!f.Open( dataPath+"tour.xct ", CFile::modeCreate | CFile::modeWrite, &e )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e.m_cause << "\n";
#endif
}
//f.Write(&mxcycle,2);
f.Write(&miBN,2);
f.Write(&ic,2);
f.Write(&totalCell,4);
for(j=0;j<mxcycle;j++) {
for(i=0;i<totalCell;i++) f.Write(mapXCTm[j]+i,2);
}
f.Close();
};
/*
FILE * iow;
iow=fopen("fpMapXCTm.txt","wt");
if (iow == NULL) {
fprintf(stderr, "Open .txt for write failed! \n");
return;
}
long temploc;
temploc=*(locXCT[45]+22*NJ+33);
fprintf(iow,"33 22 45 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[40]+30*NJ+32);
fprintf(iow,"32 30 40 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[48]+20*NJ+30);
fprintf(iow,"30 20 48 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[56]+8*NJ+26);
fprintf(iow,"26 8 56 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[62]+10*NJ+21);
fprintf(iow,"21 10 62 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[62]+30*NJ+13);
fprintf(iow,"13 30 62 %3d\n",*(mapXCTm[0]+temploc));
for(l=0;l<mxcycle;l++) {
fprintf(iow,"l=%d\n",l);
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=NI-1;i>-1;i--) {
temploc = *(locXCT[k]+j*NJ+i);
if (temploc < 0) fprintf(iow," ");
else fprintf(iow,"%3d ",*(mapXCTm[l]+temploc));
}
fprintf(iow,"j=%d\n",j);
}
fprintf(iow,"k=%d\n",k);
}
}
fclose(iow);
*/
for(i=0;i<4;i++) {
free(iACTv[i]);
free(iACTvOld[i]);
free(jACTv[i]);
free(kACTv[i]);
}
for(i=0;i<NK;i++) {
free(iXCT[i]);
free(iXCTapd[i]);
free(iXCTOld[i]);
}
}
// ---- BB conduction ----
void bbDLYm(short int i00,short int j00,short int k00) {
short int ii;
for(ii=0;ii<nttl;ii++) {
if (i00!=ipttl[0][ii]) continue;
if (j00!=ipttl[1][ii]) continue;
if (k00!=ipttl[2][ii]) continue;
// Add for BB interval by hui wang
ibbSTEP=ipttl[3][ii];
// End of add for BB interval by hui wang
ibbDLY=100*ND/(ipttl[3][ii]+1);
if((ibbDLY*ipttl[3][ii])<100*ND) ibbDLY+=1;
}
}
// ********** find the time since last excitation*********
void rdXCTm(short int icc,short int i00,short int j00,short int k00) {
short int ncyc,n1cyc;
idltt=INFTIME; /*<Comment by ALF> period between 2 continuous excitation*/
idltc=0; /*<Comment by ALF> delta of 2 periods */
short int n;
long locxct;
locxct=*(locXCT[k00]+j00*NI+i00);
if(locxct<0) return;
for(n=NCYCL-1;n>=0;n--) {
ncyc=*(mapXCTm[n]+locxct);
if (icc>=ncyc) {
idltt=icc-ncyc;
break;
}
}
if ((n<=0)||(n>=NCYCL-1)) return; /*<Comment by ALF> To prevent a over-cross array operation*/
n1cyc=*(mapXCTm[n+1]+locxct);
if (n1cyc==INFTIME) return;
idltc=n1cyc-ncyc-ncyc+*(mapXCTm[n-1]+locxct);
return;
}
// ******* sub write XCT **********
void wtXCTm(short int icc,short int i00,short int j00,short int k00)
{
short int n;
long locxct=*(locXCT[k00]+j00*NI+i00);
if(locxct<0) return;
for(n=0;n<NCYCL;n++) {
if (*(mapXCTm[n]+locxct)!=INFTIME) continue;
*(mapXCTm[n]+locxct)=icc;
if (mxcycle<n) mxcycle=n;
break;
}
return;
}
// Body surface potential calculation 体表电势计算
void BSPcalm(void) {
void BSPitmm(short int, short int **, float *, float *, float *, float *, float **, float **,short int, float *, float *);
// 心外膜电势队列
vector<float> epicPOT[TSTEP];
short int nVCG,BSPm,mTime,iTime,i,j;
short int nsnrt;
float *VCGs[3],*VCGs_reduce[3];//*VCGs_reduce[3] by sf 090622
float eff;
float *endoHnnA;
// 心内膜电势队列
float *endoPOT[TSTEP];//*endoPOT_reduce[TSTEP];//*endoPOT_reduce[TSTEP] by sf 090622
HFILE hFp;
short int index;
int nn,n0,n1,n2,ni;
float pi=3.1415926;
short int *tnd[3];
// cpu个数 默认为计算机CPU内核数
//int cpunum = omp_get_num_threads();
int cpunum = omp_get_num_procs();
//int cpunum = 4;
// 设置使用的CPU数
omp_set_num_threads(cpunum);
for(i=0;i<TSTEP;i++) {
endoPOT[i]=(float *) malloc(2*NENDO*ND3*4);
//endoPOT_reduce[i]=(float *) malloc(2*NENDO*ND3*4);//by sf 090622
if(endoPOT[i]==NULL){
cout<<"Out of memory !\n";
exit(1);
}
}
for(i=0;i<TSTEP;i++) {
for(ni=0;ni<2*NENDO*ND3;ni++) {
*(endoPOT[i]+ni)=(float)0;
//*(endoPOT_reduce[i]+ni)=(float)0;
}
}
endoHnnA=(float *) malloc(2*NENDO*ND3*4);
if((endoHnnA==NULL)) {
cout<<"Out of memory !\n";
exit(1);
}
for(ni=0;ni<2*NENDO*ND3;ni++) *(endoHnnA+ni)=(float)0;
// malloc epicardial potential array 分配心外膜电位队列
//ASSERT(Nepic != 0);
if(Nepic == 0){
fprintf(stdout, "Nepic == 0");
exit(1);
}
for (i=0; i<TSTEP; ++i) {
epicPOT[i].resize(Nepic,0);
}
for(i=0;i<3;i++) {
VCGs[i]=(float *)malloc(TSTEP*4);
VCGs_reduce[i]=(float *)malloc(TSTEP*4);//by sf 090622
if (VCGs[i]==NULL) {
cout<<"Out of memory !\n";
exit(1);
flag_flop=1;
return;
}
}
for (i=0;i<3;i++){
VCG[i]=(float)0;
}
for (i=0;i<3;i++) {
for (j=0;j<TSTEP;j++) {
*(VCGs[i]+j)=(float)0;
*(VCGs_reduce[i]+j)=(float)0;//by sf 090622
}
}
// matrix data of endo-body surface 心内膜-体表电势矩阵
for(i=0;i<3;i++) {
tnd[i] = (short int *) malloc((NL-2)*2*2);
if(tnd[i]==NULL) {
cout<<"Out of memory !\n";
exit(1);
}
}
// 初始化tnd
hFp=_lopen(dataPath+"tour.tnd ",OF_READ);
if (hFp==HFILE_ERROR) {
fprintf(stdout,"Can not open tnd file ! !\n");
fflush(stdout);
flag_flop=1;
return;
}
for(i=0;i<(NL-2)*2;i++) {
_lread(hFp,tnd[0]+i,2);
_lread(hFp,tnd[1]+i,2);
_lread(hFp,tnd[2]+i,2);
}
_lclose(hFp);
float ax,ay,az,x0,y0,z0,x1,y1,z1,x2,y2,z2,a01,a12,a20,s,h;
float x3,y3,z3,u3,x4,y4,z4,u4,x5,y5,z5,u5;
short int ISGN=1;
float *hnn;
hnn=(float *) malloc((NL-2)*2*(NL-2)*2*4);
if (hnn==NULL) {
cout<<"Out of memory !\n";
exit(1);
}
for(ni=0;ni<(NL-2)*2*(NL-2)*2;ni++) {
*(hnn+ni)=(float)0;
}
ni=0;
for(nn=0;nn<(NL-2)*2;nn++) {
// ---- measurement location -------
n0=*(tnd[0]+nn)-1; /*<Comment by ALF> triangle node array */
n1=*(tnd[1]+nn)-1;
n2=*(tnd[2]+nn)-1;
ax=(*(r[0]+n0)+*(r[0]+n1)+*(r[0]+n2))/3; /*<Comment by ALF> distance from center of triangle to view point */
ay=(*(r[1]+n0)+*(r[1]+n1)+*(r[1]+n2))/3;
az=(*(r[2]+n0)+*(r[2]+n1)+*(r[2]+n2))/3;
for(i=0;i<(NL-2)*2;i++) {
if (i==nn) {
*(hnn+ni)=0.5;
ni++;
continue;
}
/*<Comment by ALF> xn,yn,zn is the co-ordinate by set the center of triangle as the the origin*/
n0=*(tnd[0]+i)-1;
x0=*(r[0]+n0)-ax;
y0=*(r[1]+n0)-ay;
z0=*(r[2]+n0)-az;
n1=*(tnd[1]+i)-1;
x1=*(r[0]+n1)-ax;
y1=*(r[1]+n1)-ay;
z1=*(r[2]+n1)-az;
n2=*(tnd[2]+i)-1;
x2=*(r[0]+n2)-ax;
y2=*(r[1]+n2)-ay;
z2=*(r[2]+n2)-az;
a01=acos((x0*x1+y0*y1+z0*z1)/sqrt(x0*x0+y0*y0+z0*z0)/sqrt(x1*x1+y1*y1+z1*z1));
a12=acos((x1*x2+y1*y2+z1*z2)/sqrt(x1*x1+y1*y1+z1*z1)/sqrt(x2*x2+y2*y2+z2*z2));
a20=acos((x2*x0+y2*y0+z2*z0)/sqrt(x2*x2+y2*y2+z2*z2)/sqrt(x0*x0+y0*y0+z0*z0));
s=(a01+a12+a20)/2;
h=tan(s/2)*tan((s-a01)/2)*tan((s-a12)/2)*tan((s-a20)/2);
if (h<0) h=-h;
s=sqrt(h);
h=atan(s)/pi;
*(hnn+ni)=-h;
ni++;
}
}
float *endoHnnB,*endoHnnC;
endoHnnB=(float *) malloc(NENDO*ND3*(NL-2)*2*4);
endoHnnC=(float *) malloc(NENDO*ND3*(NL-2)*2*4);
if ((endoHnnB==NULL)||(endoHnnC==NULL)) {
cout<<"Out of memory !\n";
exit(1);
}
for(ni=0;ni<NENDO*ND3*(NL-2)*2;ni++) {
*(endoHnnB+ni)=(float)0;
*(endoHnnC+ni)=(float)0;
}
for(nn=0;nn<NendoB;nn++) {
// measurement location 计算位置
ax=HRTx0+endoBx[nn]*tmswf[0][0]+endoBy[nn]*tmswf[0][1]+endoBz[nn]*tmswf[0][2];
ay=HRTy0+endoBx[nn]*tmswf[1][0]+endoBy[nn]*tmswf[1][1]+endoBz[nn]*tmswf[1][2];
az=HRTz0+endoBx[nn]*tmswf[2][0]+endoBy[nn]*tmswf[2][1]+endoBz[nn]*tmswf[2][2];
for(i=0;i<(NL-2)*2;i++) {
n0=*(tnd[0]+i)-1;
x0=*(r[0]+n0)-ax;
y0=*(r[1]+n0)-ay;
z0=*(r[2]+n0)-az;
n1=*(tnd[1]+i)-1;
x1=*(r[0]+n1)-ax;
y1=*(r[1]+n1)-ay;
z1=*(r[2]+n1)-az;
n2=*(tnd[2]+i)-1;
x2=*(r[0]+n2)-ax;
y2=*(r[1]+n2)-ay;
z2=*(r[2]+n2)-az;
x3=(z2*x0-z0*x2)*(x0*y1-x1*y0)-(x2*y0-x0*y2)*(z0*x1-z1*x0);
y3=(x2*y0-x0*y2)*(y0*z1-y1*z0)-(y2*z0-y0*z2)*(x0*y1-x1*y0);
z3=(y2*z0-y0*z2)*(z0*x1-z1*x0)-(z2*x0-z0*x2)*(y0*z1-y1*z0);
u3=(y2*z0-y0*z2)*(y0*z1-y1*z0)+(z2*x0-z0*x2)*(z0*x1-z1*x0)+(x2*y0-x0*y2)*(x0*y1-x1*y0);
a01=atan(-sqrt(x3*x3+y3*y3+z3*z3)/u3);
x4=(z0*x1-z1*x0)*(x1*y2-x2*y1)-(x0*y1-x1*y0)*(z1*x2-z2*x1);
y4=(x0*y1-x1*y0)*(y1*z2-y2*z1)-(y0*z1-y1*z0)*(x1*y2-x2*y1);
z4=(y0*z1-y1*z0)*(z1*x2-z2*x1)-(z0*x1-z1*x0)*(y1*z2-y2*z1);
u4=(y0*z1-y1*z0)*(y1*z2-y2*z1)+(z0*x1-z1*x0)*(z1*x2-z2*x1)+(x0*y1-x1*y0)*(x1*y2-x2*y1);
a12=atan(-sqrt(x4*x4+y4*y4+z4*z4)/u4);
x5=(z1*x2-z2*x1)*(x2*y0-x0*y2)-(x1*y2-x2*y1)*(z2*x0-z0*x2);
y5=(x1*y2-x2*y1)*(y2*z0-y0*z2)-(y1*z2-y2*z1)*(x2*y0-x0*y2);
z5=(y1*z2-y2*z1)*(z2*x0-z0*x2)-(z1*x2-z2*x1)*(y2*z0-y0*z2);
u5=(y1*z2-y2*z1)*(y2*z0-y0*z2)+(z1*x2-z2*x1)*(z2*x0-z0*x2)+(x1*y2-x2*y1)*(x2*y0-x0*y2);
a20=atan(-sqrt(x5*x5+y5*y5+z5*z5)/u5);
s=(a01+a12+a20-pi)*ISGN; // ISGN=1 only; since ISGN=-1 is impossible in our case
h=tan(s/2)*tan((s-a01)/2)*tan((s-a12)/2)*tan((s-a20)/2);
if (h<0) h=-h;
s=sqrt(h);
h=atan(s)/pi;
*(endoHnnB+nn)=h;
}
}
for(nn=0;nn<NendoC;nn++) {
// measurement location 计算位置
ax=HRTx0+endoCx[nn]*tmswf[0][0]+endoCy[nn]*tmswf[0][1]+endoCz[nn]*tmswf[0][2];
ay=HRTy0+endoCx[nn]*tmswf[1][0]+endoCy[nn]*tmswf[1][1]+endoCz[nn]*tmswf[1][2];
az=HRTz0+endoCx[nn]*tmswf[2][0]+endoCy[nn]*tmswf[2][1]+endoCz[nn]*tmswf[2][2];
for(i=0;i<(NL-2)*2;i++) {
n0=*(tnd[0]+i)-1;
x0=*(r[0]+n0)-ax;
y0=*(r[1]+n0)-ay;
z0=*(r[2]+n0)-az;
n1=*(tnd[1]+i)-1;
x1=*(r[0]+n1)-ax;
y1=*(r[1]+n1)-ay;
z1=*(r[2]+n1)-az;
n2=*(tnd[2]+i)-1;
x2=*(r[0]+n2)-ax;
y2=*(r[1]+n2)-ay;
z2=*(r[2]+n2)-az;
x3=(z2*x0-z0*x2)*(x0*y1-x1*y0)-(x2*y0-x0*y2)*(z0*x1-z1*x0);
y3=(x2*y0-x0*y2)*(y0*z1-y1*z0)-(y2*z0-y0*z2)*(x0*y1-x1*y0);
z3=(y2*z0-y0*z2)*(z0*x1-z1*x0)-(z2*x0-z0*x2)*(y0*z1-y1*z0);
u3=(y2*z0-y0*z2)*(y0*z1-y1*z0)+(z2*x0-z0*x2)*(z0*x1-z1*x0)+(x2*y0-x0*y2)*(x0*y1-x1*y0);
a01=atan(-sqrt(x3*x3+y3*y3+z3*z3)/u3);
x4=(z0*x1-z1*x0)*(x1*y2-x2*y1)-(x0*y1-x1*y0)*(z1*x2-z2*x1);
y4=(x0*y1-x1*y0)*(y1*z2-y2*z1)-(y0*z1-y1*z0)*(x1*y2-x2*y1);
z4=(y0*z1-y1*z0)*(z1*x2-z2*x1)-(z0*x1-z1*x0)*(y1*z2-y2*z1);
u4=(y0*z1-y1*z0)*(y1*z2-y2*z1)+(z0*x1-z1*x0)*(z1*x2-z2*x1)+(x0*y1-x1*y0)*(x1*y2-x2*y1);
a12=atan(-sqrt(x4*x4+y4*y4+z4*z4)/u4);
x5=(z1*x2-z2*x1)*(x2*y0-x0*y2)-(x1*y2-x2*y1)*(z2*x0-z0*x2);
y5=(x1*y2-x2*y1)*(y2*z0-y0*z2)-(y1*z2-y2*z1)*(x2*y0-x0*y2);
z5=(y1*z2-y2*z1)*(z2*x0-z0*x2)-(z1*x2-z2*x1)*(y2*z0-y0*z2);
u5=(y1*z2-y2*z1)*(y2*z0-y0*z2)+(z1*x2-z2*x1)*(z2*x0-z0*x2)+(x1*y2-x2*y1)*(x2*y0-x0*y2);
a20=atan(-sqrt(x5*x5+y5*y5+z5*z5)/u5);
s=(a01+a12+a20-pi)*ISGN; // ISGN=1 only; since ISGN=-1 is impossible in our case
h=tan(s/2)*tan((s-a01)/2)*tan((s-a12)/2)*tan((s-a20)/2);
if (h<0) h=-h;
s=sqrt(h);
h=atan(s)/pi;
*(endoHnnC+nn)=h;
}
}
//TRACE("\nNendoB=%d NendoC=%d",NendoB,NendoC);
printf("\nNendoB=%d NendoC=%d\n",NendoB,NendoC);
bsptime[0]=clock();
printf("before BSPitmm-begin=%f,\n",(bsptime[0]-starttime)/CLK_TCK);
cout<<"Num of CPU: "<<cpunum<<endl;
//-------------------- modified by ALF at 2008-8-19 begin -------------------->
//add: store the solid angle of epicardial triangle 保存心外膜刚体三角形
vector<float> epicHnn;
epicHnn.resize(Nepic*ND3*(NL-2)*2, 0);
for(nn=0;nn<Nepic;nn++) {
// ---- measurement location -------
ax=HRTx0+epicX[nn]*tmswf[0][0]+epicY[nn]*tmswf[0][1]+epicZ[nn]*tmswf[0][2];
ay=HRTy0+epicX[nn]*tmswf[1][0]+epicY[nn]*tmswf[1][1]+epicZ[nn]*tmswf[1][2];
az=HRTz0+epicX[nn]*tmswf[2][0]+epicY[nn]*tmswf[2][1]+epicZ[nn]*tmswf[2][2];
for(i=0;i<(NL-2)*2;i++) {
n0=*(tnd[0]+i)-1;
x0=*(r[0]+n0)-ax;
y0=*(r[1]+n0)-ay;
z0=*(r[2]+n0)-az;
n1=*(tnd[1]+i)-1;
x1=*(r[0]+n1)-ax;
y1=*(r[1]+n1)-ay;
z1=*(r[2]+n1)-az;
n2=*(tnd[2]+i)-1;
x2=*(r[0]+n2)-ax;
y2=*(r[1]+n2)-ay;
z2=*(r[2]+n2)-az;
x3=(z2*x0-z0*x2)*(x0*y1-x1*y0)-(x2*y0-x0*y2)*(z0*x1-z1*x0);
y3=(x2*y0-x0*y2)*(y0*z1-y1*z0)-(y2*z0-y0*z2)*(x0*y1-x1*y0);
z3=(y2*z0-y0*z2)*(z0*x1-z1*x0)-(z2*x0-z0*x2)*(y0*z1-y1*z0);
u3=(y2*z0-y0*z2)*(y0*z1-y1*z0)+(z2*x0-z0*x2)*(z0*x1-z1*x0)+(x2*y0-x0*y2)*(x0*y1-x1*y0);
a01=atan(-sqrt(x3*x3+y3*y3+z3*z3)/u3);
x4=(z0*x1-z1*x0)*(x1*y2-x2*y1)-(x0*y1-x1*y0)*(z1*x2-z2*x1);
y4=(x0*y1-x1*y0)*(y1*z2-y2*z1)-(y0*z1-y1*z0)*(x1*y2-x2*y1);
z4=(y0*z1-y1*z0)*(z1*x2-z2*x1)-(z0*x1-z1*x0)*(y1*z2-y2*z1);
u4=(y0*z1-y1*z0)*(y1*z2-y2*z1)+(z0*x1-z1*x0)*(z1*x2-z2*x1)+(x0*y1-x1*y0)*(x1*y2-x2*y1);
a12=atan(-sqrt(x4*x4+y4*y4+z4*z4)/u4);
x5=(z1*x2-z2*x1)*(x2*y0-x0*y2)-(x1*y2-x2*y1)*(z2*x0-z0*x2);
y5=(x1*y2-x2*y1)*(y2*z0-y0*z2)-(y1*z2-y2*z1)*(x2*y0-x0*y2);
z5=(y1*z2-y2*z1)*(z2*x0-z0*x2)-(z1*x2-z2*x1)*(y2*z0-y0*z2);
u5=(y1*z2-y2*z1)*(y2*z0-y0*z2)+(z1*x2-z2*x1)*(z2*x0-z0*x2)+(x1*y2-x2*y1)*(x2*y0-x0*y2);
a20=atan(-sqrt(x5*x5+y5*y5+z5*z5)/u5);
s=(a01+a12+a20-pi)*ISGN; // ISGN=1 only; since ISGN=-1 is impossible in our case
h=tan(s/2)*tan((s-a01)/2)*tan((s-a12)/2)*tan((s-a20)/2);
if (h<0) h=-h;
s=sqrt(h);
h=atan(s)/pi;
epicHnn[nn] = h;
}
}
//---- body surface potential distribution [time(msec)]体表的电势分布
nsnrt=mBCL;
nTimeStep=0;
nVCG=0;//nVCG_old=0;//by sf 090401
itbuf=0;
bufGRD=(float)0;
for(short int n=0;n<2;n++) {
for(short int m=0;m<3;m++) {
bufVCG[n][m]=(float)0;
}
}
mTime=maxXctStep*3;
iTime=3*ND;
//printf("\nmTime1=%d",(mTime/3+2));
gatheralldpl = (float**)malloc((mTime/3+1)*sizeof(float)); //for 1--mTime/3
gatherallijk = (int**)malloc((mTime/3+1)*sizeof(int));
countallijk = (int*)malloc((mTime/3+1)*sizeof(int));
countallijk_reduce = (int*)malloc((mTime/3+1)*sizeof(int));
for(i=0;i<=mTime;i=i+3)
{
*(countallijk+i/3)=0; //by sf 090621
*(countallijk_reduce+i/3)=0;
}
// isumloops记录每个iTime循环次数的总和
// iloops[3]0行: [0,0]存dipole总数,其余存dipole数量
// 1行: 对dipole数量排序
// 2行:排序后dipole对应的iTime序号
// itask[2]0行:存各MPI进程的任务分配的进程号
// 1行:每个进程分别各自存分配到的iTime号,其中0元素存分到的任务个数
for(i=0;i<2;i=i+1)
{
itask[i] = (int *)malloc((mTime/3+1)*sizeof(int));
for(j=0;j<=mTime;j=j+3)
{
*(itask[i]+j/3)=777;
}
}
for(i=0;i<3;i=i+1)
{
iloops[i] = (int *)malloc((mTime/3+1)*sizeof(int));
for(j=0;j<=mTime;j=j+3)
{
*(iloops[i]+j/3)=-8;
}
}
for(j=0;j<=mTime;j=j+3)
{
*(iloops[2]+j/3)=j;
}
//printf("before BSPitmm-begin=%f,\n",(bsptime[0]-starttime)/CLK_TCK);
iTimebegin=1,iTimeend=mTime/3;
int loopnum=0,loop=0;
bsptime[0] =clock();
printf("mTime:%d iTimebegin:%d iTimeend:%d\n", mTime, iTimebegin, iTimeend);
#pragma omp parallel for // OpenMP--begin //by sf 090621计算每个iTime的dipole数量
for(i=iTime;i<=mTime;i=i+3)
{
int tid=omp_get_thread_num(),tnum=omp_get_num_threads();
corenum=tnum;
*(iloops[0]+i/3)=BSPitmmcount(i);
*(iloops[1]+i/3)=*(iloops[0]+i/3);//为了后面dipole排序
//isumdipoles=isumdipoles+*(iloops[0]+i/3);//myid=0计算的dipole和其他节点不一样,why?god save me;查名原因:多线程共享,累加出错
*(itask[0]+i/3)=i;
if (*(iloops[0]+i/3)>0)
{
//printf("malloc j=%d,myid=%d,*(countallijk+i/3)=%d,*(countallijk_reduce+j/3)=%d\n",j,myid,*(countallijk+j/3),*(countallijk_reduce+j/3));
gatherallijk[i/3] = (int*)malloc(*(iloops[0]+i/3)*3*sizeof(int));
gatheralldpl[i/3] = (float*)malloc(*(iloops[0]+i/3)*3*sizeof(float));
for(j=0;j<*(iloops[0]+i/3);j=j+1)
{
*(gatherallijk[i/3]+j)=0;
*(gatheralldpl[i/3]+j)=float(0.0);
};
}
//printf("%d iloops\n",*(iloops[0]+i/3));//printf("iTime=%d,loopcount=%d\n",loop,*(iloops+loop/3));
}
/*
for(i=1;i<=iTimeend;i++)
{
//printf("i=%d,iloops[0]=%d,itask[0]=%d,itask[1]=%d,myid=%d\n",i,*(iloops[0]+i),*(itask[0]+i),*(itask[1]+i),myid);
isumdipoles=isumdipoles+*(iloops[0]+i);
}
*/
//ofstream out("E:\\out.txt");
//iTimebegin=1,iTimeend=mTime/3;
//dipole排序 *(iloops[1]+0)存放暂存数据
for(loop=1;loop<=iTimeend;loop++)
{
loopnum=loop;
for(i=loop+1;i<=iTimeend;i++){
if (*(iloops[1]+loopnum)<*(iloops[1]+i)){
loopnum=i;
}
}
*(iloops[1]+0)=*(iloops[1]+loopnum);
*(iloops[2]+0)=*(iloops[2]+loopnum);
*(iloops[1]+loopnum)=*(iloops[1]+loop);
*(iloops[2]+loopnum)=*(iloops[2]+loop);
*(iloops[1]+loop)=*(iloops[1]+0);
*(iloops[2]+loop)=*(iloops[2]+0);
//cout<<"dipole数量:"<<*(iloops[1]+loop)<<", dipole对应iTime号:"<<*(iloops[2]+loop)<<endl;
//cout<<"dipole数量:"<<*(iloops[1]+loop)<<", dipole对应iTime号:"<<*(iloops[2]+loop)<<endl;
//out<<""<<*(iloops[1]+loop)<<","<<endl;
};
//out.close();
//091212--100211 sumdipole=dipole总数,dipolep=平均dipole数,dipole0=MPI进程0 dipole数,
//iteration0=MPI进程0 iteration数,turn=0,mTimeby0=[1,mTimeby0]--其他进程;(mTimeby0,iTimeEND]--进程0,count=1,head=计数
int sumdipole=0,dipolep=0,dipole0=0,iteration0=0,turn=0,mTimeby0=0,count=1,head=1;
// GPU、CPU计算的diploe数
int dipole_gpu = 0, dipole_cpu = 0;
int tail,tailbegin,tailend,exi,ldipole,count1; //[1,gpuend) [gpuend,cpuend](cpuend,tail)[tail,*(itask[1]+0)]
//算dipole总数
for(loop=1;loop<=iTimeend;loop++)
{
sumdipole=sumdipole+*(iloops[0]+loop);
};
//平均dipole数
dipolep=sumdipole/cpunum;
cout<<"dipolep = "<<dipolep<<" sumdiploep = "<<sumdipole<<endl;
// 调用GPU计算的数量和调用CPU计算的数量
int gpu_op_num, cpu_op_num;
if(cpunum % 2 == 0){
cpu_op_num = gpu_op_num = cpunum / 2;
}else{
// GPU多分点
gpu_op_num = cpunum / 2 + 1;
cpu_op_num = cpunum / 2;
}
// isumloops记录每个iTime循环次数的总和
// iloops[3]0行: [0,0]存dipole总数,其余存dipole数量
// 1行: 对dipole数量排序
// 2行:排序后dipole对应的iTime序号
// itask[2]0行:存各MPI进程的任务分配的进程号
// 1行:每个进程分别各自存分配到的iTime号,其中0元素存分到的任务个数
/*
dipole0=0;
for(loop=iTimeend;loop>=1;loop--)//给进程0分配尾部数量<=平均dipole的iteration
{
dipole0=dipole0+*(iloops[1]+loop);
if (dipole0 > dipolep)
{
break;
};
*(itask[0]+loop)=0;
};
*/
//mTimeby0=loop;//未分配的任务包含mTimeby0 [1,mTimeby0]给其他进程
//mTimeby0=iTimeend;
int msumdipole=0,mdipolep=0,tdipole=0,predictend=0,gpuend=1,cpuend=iTimeend;
//msumdipole=每个MPI进程分配到的dipole数,mdipolep=每个线程的平均dipole数,
//gpuspeed=gpu加速比,tdipole临时值,predictend=预测动态调度中间点(t0和其他线程相遇点);
//[1,gpuend),(cpuend,iTimeend]静态调度;[gpuend,cpuend]动态调度,数量:cpuend-gpuend+1;
cout<<"iTimeend:"<<iTimeend<<endl;
turn=1;
//for(loop=1;loop<=mTimeby0;loop++)
for(loop=1;loop<=iTimeend;loop++)
{
*(itask[0]+loop)=turn;
turn++;
if (turn>cpunum-1) turn=1;
};
// isumloops记录每个iTime循环次数的总和
// iloops[3]0行: [0,0]存dipole总数,其余存dipole数量
// 1行: 对dipole数量排序
// 2行:排序后dipole对应的iTime序号
// itask[2]0行:存各MPI进程的任务分配的进程号
// 1行:每个进程分别各自存分配到的iTime号,其中0元素存分到的任务个数
//int msumdipole=0,mdipolep=0,tdipole=0,predictend=0,gpuend=1,cpuend=iTimeend;
//msumdipole=每个MPI进程分配到的dipole数,mdipolep=每个线程的平均dipole数,
//gpuspeed=gpu加速比,tdipole临时值,predictend=预测动态调度中间点(t0和其他线程相遇点);
//[1,gpuend),(cpuend,iTimeend]静态调度;[gpuend,cpuend]动态调度,数量:cpuend-gpuend+1;
head=1;
for(loop=1;loop<=iTimeend;loop++)
{
//printf("i=%d,iloops[0]=%d,itask[0]=%d,itask[1]=%d,myid=%d\n",loop,*(iloops[0]+loop),*(itask[0]+loop),*(itask[1]+loop),myid);
//if (myid==*(itask[0]+loop))
{
*(itask[1]+head)=*(iloops[2]+loop);
head++;
msumdipole=msumdipole+*(iloops[0]+*(itask[1]+loop)/3);
};
};
//*(itask[1])每个进程保存自己的iTime,*(itask[1]+0)保存最后一个任务的序号,也是任务个数
*(itask[1]+0)=head-1;
cout<<"*(itask[1]+0):"<<*(itask[1]+0)<<endl;
// iloops[3]0行: [0,0]存dipole总数,其余存dipole数量
// 1行: 对dipole数量排序
// 2行:排序后dipole对应的iTime序号
// itask[2]0行:存各MPI进程的任务分配的进程号
// 1行:每个进程分别各自存分配到的iTime号,其中0元素存分到的任务个数
// mdipolep O到P分到的diploe数量
mdipolep=msumdipole*gpuspeed/(gpuspeed+corenum-1);
for(loop=1;loop<=*(itask[1]+0);loop++)
{
// tdipole线程线程数量
tdipole=tdipole+*(iloops[0]+*(itask[1]+loop)/3);
if (tdipole>=mdipolep)
break;
};
// predictend就是论文中的P
predictend=loop-1;
//printf("gpuspeed=%d,corenum=%d,loop=%d,predictend=%d,tdipole=%d,mdipolep=%d\n",gpuspeed,corenum,loop,predictend,tdipole,mdipolep);
int gpuLoop, cpuLoop;
gpuLoop = 1;
cpuLoop = iTimeend;
#pragma omp parallel shared(gpuLoop, cpuLoop) //private(iTime) //预跑+动态 //by sf 090828 OpenMP--begin
{
int tid=omp_get_thread_num();
int tnum=omp_get_num_threads(),myloop,myiTime;
int s=2*tid-1,k=2*(tnum-1);
double tbegin=clock(),tend=0.0;
int dipolesum=0,iterationsum=0;
//printf("11(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
if (useGPU==1 && tid==0 && GPUnum>0)
{
//printf("112(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
gpu_transdata(epicX_old,epicY_old,epicZ_old,tnd,r,rn,endoBx,endoBy,endoBz,endoCx,endoCy,endoCz,tmswf);
//printf("113(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
}
threadnum=tnum;
//printf("13(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
if (tid==0)
{
//myloop=1;//myloop是计数器,是位置,坐标,不乘3
//while (myloop < gpuend)
while(gpuLoop)
{
// 获得偶侄子数量
//dipolesum=dipolesum+*(iloops[0]+*(itask[1]+myloop)/3);
dipolesum=dipolesum+*(iloops[0]+*(itask[1]+gpuLoop)/3);
iterationsum++;
//printf("gsNO:=%d,itask[1]+0=%d,myid=%d,tid=%d,useGPU=%d,Gnum=%d,iTime=%d,dipole=%d\n",myloop,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop),*(iloops[0]+*(itask[1]+myloop)/3));
//BSPitmm(*(itask[1]+myloop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+myloop)/3-1][0]);
BSPitmm(*(itask[1]+gpuLoop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+gpuLoop)/3-1][0]);
//myloop++;
gpuLoop++;
if(gpuLoop >= predictend){
break;
}
};
}
else
{
//myloop=(tail-1)-(tid-1);
//while (myloop > cpuend)
while(cpuLoop)
{
dipolesum=dipolesum+*(iloops[0]+*(itask[1]+cpuLoop)/3);
iterationsum++;
//printf("csNO:=%d,itask[1]+0=%d,myid=%d,tid=%d,useGPU=%d,Gnum=%d,iTime=%d,dipole=%d\n",myloop,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop),*(iloops[0]+*(itask[1]+myloop)/3));
//BSPitmm(*(itask[1]+myloop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+myloop)/3-1][0]);
BSPitmm(*(itask[1]+cpuLoop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+cpuLoop)/3-1][0]);
//s=k-s;myloop=myloop-s;//myloop=myloop-(tnum-1);
cpuLoop--;
if(cpuLoop <= predictend){
break;
}
};
};
}
/*
//for(i=1;i<=iTimeend;i++)
//{
// printf("i=%d,iloops[0]=%d,itask[0]=%d,itask[1]=%d,myid=%d\n",i,*(iloops[0]+i),*(itask[0]+i),*(itask[1]+i),myid);
//}
int gwindow=1,cwindow=3;
tail=*(itask[1]+0);
gpuend=predictend-gwindow;//3和6可以自己定
cpuend=predictend+(corenum-1)*cwindow;
tdipole=0;
// P点diploe数
ldipole=*(iloops[0]+*(itask[1]+predictend)/3)*1.2;
for(loop=*(itask[1]+0);loop>=1;loop--)
{
//printf("e loop=%d,tail=%d,*(itask[1]+loop)=%d,*(itask[1]+tail)=%d,myid=%d,tdipole=%d,ldipole=%d\n",loop,tail,*(itask[1]+loop),*(itask[1]+tail),myid,tdipole,ldipole);
//
if ( *(iloops[0]+*(itask[1]+loop)/3) > 30 )
{
tdipole=tdipole+*(iloops[0]+*(itask[1]+loop)/3);
if (tdipole > ldipole) break;
exi=*(itask[1]+loop);
*(itask[1]+loop)=*(itask[1]+tail);
*(itask[1]+tail)=exi;
//printf("exi loop=%d,tail=%d,*(itask[1]+loop)=%d,*(itask[1]+tail)=%d,myid=%d,tdipole=%d,ldipole=%d\n",loop,tail,*(itask[1]+loop),*(itask[1]+tail),myid,tdipole,ldipole);
tail--;
}
};
if (gpuend<=0) gpuend=1;
if (cpuend>*(itask[1]+0)) cpuend=*(itask[1]+0);
if (tail <= cpuend) tail=cpuend+1;
printf("dipolep=%d,sumdipole=%d,MPI-its=%d,msumdipole=%d,myid=%d,pre-end=%d,gpuend=%d,cpuend=%d\n",dipolep,sumdipole,*(itask[1]+0),msumdipole,myid,predictend,gpuend,cpuend);
if (threadnum>0) omp_set_num_threads(threadnum);
count=gpuend;iTimebegin=gpuend;iTimeend=cpuend;
count1=tail;tailbegin=tail;tailend=*(itask[1]+0);
printf("tail=%d,tailbegin=%d,tailend=%d,ldipole=%d,count1=%d,myid=%d\n",tail,tailbegin,tailend,ldipole,count1,myid);
//for(i=1;i<=mTime/3;i++)
//{
// printf("ii=%d,iloops[0]=%d,itask[0]=%d,itask[1]=%d,myid=%d\n",i,*(iloops[0]+i),*(itask[0]+i),*(itask[1]+i),myid);
//}
double MPItimebegin=clock(),MPItimeend=0.0;
#pragma omp parallel //private(iTime) //预跑+动态 //by sf 090828 OpenMP--begin
{
int tid=omp_get_thread_num(),tnum=omp_get_num_threads(),myloop,myiTime;
int s=2*tid-1,k=2*(tnum-1);
double tbegin=clock(),tend=0.0;
int dipolesum=0,iterationsum=0;
//printf("11(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
if (useGPU==1 && tid==0 && GPUnum>0)
{
//printf("112(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
gpu_transdata(epicX_old,epicY_old,epicZ_old,tnd,r,rn,endoBx,endoBy,endoBz,endoCx,endoCy,endoCz,tmswf);
//printf("113(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
}
threadnum=tnum;
//printf("13(itask0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,count=%d,iTimebegin=%d,iTimeend=%d\n",*(itask[1]+0),myid,tid,useGPU,GPUnum,count,iTimebegin,iTimeend);
if (tid==0)
{
myloop=1;//myloop是计数器,是位置,坐标,不乘3
while (myloop < gpuend)
{
// 获得偶侄子数量
dipolesum=dipolesum+*(iloops[0]+*(itask[1]+myloop)/3);
iterationsum++;
//printf("gsNO:=%d,itask[1]+0=%d,myid=%d,tid=%d,useGPU=%d,Gnum=%d,iTime=%d,dipole=%d\n",myloop,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop),*(iloops[0]+*(itask[1]+myloop)/3));
BSPitmm(*(itask[1]+myloop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+myloop)/3-1][0]);
myloop++;
};
}
else
{
myloop=(tail-1)-(tid-1);
while (myloop > cpuend)
{
dipolesum=dipolesum+*(iloops[0]+*(itask[1]+myloop)/3);iterationsum++;
//printf("csNO:=%d,itask[1]+0=%d,myid=%d,tid=%d,useGPU=%d,Gnum=%d,iTime=%d,dipole=%d\n",myloop,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop),*(iloops[0]+*(itask[1]+myloop)/3));
BSPitmm(*(itask[1]+myloop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+myloop)/3-1][0]);
s=k-s;myloop=myloop-s;//myloop=myloop-(tnum-1);
};
};
fprintf(stdout,"static over,tid=%d,myid=%d,dipolesum=%d,iterationsum=%d\n", tid,myid,dipolesum,iterationsum);
#pragma omp critical
{
myiTime=count;count++;
//printf("3-1myiTime=%d,*(itask[1]+0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d\n",myiTime,*(itask[1]+0),myid,tid,useGPU,GPUnum);
if (useGPU==1 && tid==0)// && GPUnum>0)
{
myloop=iTimebegin;iTimebegin++;
//printf("3-3myiTime=%d,*(itask[1]+0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,iTime=%d\n",myiTime,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop));
}
else
{
myloop=iTimeend;iTimeend--;
//printf("3-4myiTime=%d,*(itask[1]+0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,iTime=%d\n",myiTime,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop));
};
//printf("3-2myiTime=%d,*(itask[1]+0)=%d,myid=%d,tid=%d,useGPU=%d,GPUnum=%d,iTime=%d\n",myiTime,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop));
}
while (myiTime <= cpuend)
{
dipolesum=dipolesum+*(iloops[0]+*(itask[1]+myloop)/3);iterationsum++;
//printf("D:=%d,itask[1]+0=%d,myid=%d,tid=%d,useGPU=%d,Gnum=%d,iTime=%d,dipole=%d\n",myiTime,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop),*(iloops[0]+*(itask[1]+myloop)/3));
BSPitmm(*(itask[1]+myloop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+myloop)/3-1][0]);
#pragma omp critical
{
myiTime=count;count++;
if (useGPU==1 && tid==0 )//&& GPUnum>0)
{
myloop=iTimebegin;iTimebegin++;
}
else
{
myloop=iTimeend;iTimeend--;
};
}
};
//tail
//
//二级动态调度
//
#pragma omp critical
{
myiTime=count1;count1++;
if (useGPU==1 && tid==0)// && GPUnum>0)
{
myloop=tailbegin;tailbegin++;
}
else
{
myloop=tailend;tailend--;
};
}
while (myiTime <= *(itask[1]+0))
{
dipolesum=dipolesum+*(iloops[0]+*(itask[1]+myloop)/3);iterationsum++;
//printf("T:=%d,itask[1]+0=%d,myid=%d,tid=%d,useGPU=%d,Gnum=%d,iTime=%d,dipole=%d\n",myiTime,*(itask[1]+0),myid,tid,useGPU,GPUnum,*(itask[1]+myloop),*(iloops[0]+*(itask[1]+myloop)/3));
BSPitmm(*(itask[1]+myloop), tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[*(itask[1]+myloop)/3-1][0]);
#pragma omp critical
{
myiTime=count1;count1++;
if (useGPU==1 && tid==0)// && GPUnum>0)
{
myloop=tailbegin;tailbegin++;
}
else
{
myloop=tailend;tailend--;
};
}
};
//
tend=clock();
fprintf(stdout,"!threadtime = %f,tid=%d,myid=%d,dipolesum=%d,iterationsum=%d\n", (tend-tbegin)/CLK_TCK,tid,myid,dipolesum,iterationsum);
#pragma omp barrier
};//by sf 090828 OpenMP--end
*/
//MPItimeend=clock();
// fprintf(stdout,"!!!MPItime = %f,myid=%d,*(itask[1]+0)=%d\n", (MPItimeend-MPItimebegin)/CLK_TCK,myid,*(itask[1]+0));
// iTime=(numprocs-myid)*3;
//#pragma omp parallel //private(iTime) //jingtai //by sf 090403 OpenMP--begin
//{ //by sf 090403 OpenMP--begin
// int tid=omp_get_thread_num(),tnum=omp_get_num_threads(),myiTime;
// if (useGPU==1 && tid==0 && GPUnum>0)
// {
// gpu_transdata(epicX_old,epicY_old,epicZ_old,tnd,r,rn,endoBx,endoBy,endoBz,endoCx,endoCy,endoCz,tmswf);
// }
// threadnum=tnum;
// #pragma omp critical
// {
// myiTime=iTime;iTime=iTime+numprocs*3;
// }
// while (myiTime <= mTime) {
// BSPitmm(myiTime, tnd, hnn, endoHnnA, endoHnnB, endoHnnC,endoPOT,VCGs,nsnrt, &epicHnn[0], &epicPOT[myiTime/3-1][0]); //jintai iTime
// #pragma omp critical
// {
// //*(iStep+myiTime/3)=myiTime/ND;//*(iStep+nTimeStep)=iTime/ND;
// myiTime=iTime;iTime=iTime+numprocs*3;
// }
// }
//#pragma omp barrier
// };//by sf 090403 OpenMP--end
bsptime[1] =clock();
nTimeStep=mTime/3;nVCG=nTimeStep;//printf("*iTime=%d,nVCG=%d,nTimeStep=%d,iTime_old=%d\n",iTime,nVCG,nTimeStep,iTime_old);//by sf 090402-6
for(int iTime=3*ND;iTime<=mTime;iTime=iTime+3) *(iStep+iTime/3)=iTime/ND; //by sf 090621
//if (myid==1)
//{i=27;printf("27beg-1bcasti=%d,myid=%d,ijk=%d,%d,%d\n",i,myid,*(gatherallijk[i/3]),*(gatherallijk[i/3]+1),*(gatherallijk[i/3]+2));
//};
//for(i=0;i<=mTime;i=i+3)
//{
//printf("****iTime=%d,loopcount=%d\n",i,*(countallijk+i/3));
//};
/*
if (numprocs>1) //为了让1个进程也可以执行
{
for(i=0;i<=mTime;i=i+3)
{
*(countallijk+i/3)=*(iloops[0]+i/3)*3;
};
float VCGssend[3],POTsend[NL];
for(i=1;i<=mTime/3;i=i+1)
{
if (*(itask[0]+i)>0)
if (myid==0)
{
//printf("++endoPOT[%d]=%f,*(itask[0]+i/3)=%d,*(itask[0]+i)=%d,*(iloops[2]+i)=%d,myid=%d\n",*(iloops[2]+i)/3,*(endoPOT[*(iloops[2]+i)/3]+3),*(itask[0]+i),*(itask[0]+i),*(iloops[2]+i),myid);
j=*(iloops[2]+i)/3;
int sendID=*(itask[0]+i);
//*(iloops[2]+i)/3是对应的iTime/3,endoPOT数组0列数据被利用了,所以应该是iTime/3-1,VCGs是按列存储的0列没有用
//POT是按列存储的
//printf("j=%d,*(iloops[1]+j)*3=%d,myid=%d\n",j,*(iloops[1]+i)*3,myid);
*(VCGs[0]+j)=VCGssend[0];*(VCGs[1]+j)=VCGssend[1];*(VCGs[2]+j)=VCGssend[2];
for(int n=0;n<nPos;n++) {
*(POT[n]+j)=POTsend[n];
}
//printf("--endoPOT[%d]=%f,*(itask[0]+i/3)=%d,*(itask[0]+i)=%d,*(iloops[2]+i)=%d,myid=%d\n",*(iloops[2]+i)/3,*(endoPOT[*(iloops[2]+i)/3]+3),*(itask[0]+i),*(itask[0]+i),*(iloops[2]+i),myid);
}
else
{
if (myid==*(itask[0]+i))
{
j=*(iloops[2]+i)/3;
//printf("j=%d,*(iloops[1]+j)*3=%d,myid=%d\n",j,*(iloops[1]+i)*3,myid);
VCGssend[0]=*(VCGs[0]+j);VCGssend[1]=*(VCGs[1]+j);VCGssend[2]=*(VCGs[2]+j);
for(int n=0;n<nPos;n++) {
POTsend[n]=*(POT[n]+j);
}
}
//printf("##endoPOT[%d]=%f,*(itask[0]+i)=%d,*(itask[0]+i)=%d,*(iloops[2]+i)=%d,myid=%d\n",*(iloops[2]+i)/3,*(endoPOT[*(iloops[2]+i)/3]+3),*(itask[0]+i),*(itask[0]+i),*(iloops[2]+i),myid);
};
};
};
*/
bsptime[2] =clock();
//if (myid==0)
{
CFile f2;
CFileException e2;
if (!f2.Open( dataPath+"tour.dpn ", CFile::modeCreate | CFile::modeWrite, &e2 )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e2.m_cause << "\n";
#endif
}
int idpl;//printf("mTime=%d\n",mTime);
for(iTime=3;iTime<=mTime;iTime=iTime+3)
{
idpl=*(countallijk+(iTime/3))/3;
f2.Write(&iTime,2);//f2.Write(&iTime0,2);
f2.Write(&idpl,2);//f2.Write(&idpl,2);
//fprintf(fptime,"%d\n",iTime);
//fprintf(fptime,"%d\n",idpl);
};
f2.Close();
//fprintf(stdout,"f2.Close();,myid=%d\n",myid);fflush(stdout);
//}//single-tour.dpn-end
// fclose(fptime);
//printf("f2-over\n");
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'d');
//filepath.SetAt(index+2,'p');
//filepath.SetAt(index+3,'l');
//#pragma omp single
// {
CFile f3;
CFileException e3;
if (!f3.Open(dataPath+"tour.dpl ", CFile::modeCreate | CFile::modeWrite, &e3 )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e3.m_cause << "\n";
#endif
}
//fptime=fopen(dataPath+"dpl.txt","w") ;
short int ii,jj,kk;
for(iTime=3;iTime<=mTime;iTime=iTime+3)
{
//fprintf(fptime,"%d\n",iTime);
f3.Write(&iTime,2);//f.Write(&iTime0,2);//from line 4070
for(j=0;j<*(countallijk+iTime/3);j=j+3)
{
ii=*(gatherallijk[iTime/3]+j);f3.Write(&ii,2);//f.Write(gatherallijk[iTime/3]+j,sizeof(int));
jj=*(gatherallijk[iTime/3]+j+1);f3.Write(&jj,2);//f.Write(gatherallijk[iTime/3]+j+1,sizeof(int));
kk=*(gatherallijk[iTime/3]+j+2);f3.Write(&kk,2);//f.Write(gatherallijk[iTime/3]+j+2,sizeof(int));
//fprintf(fptime,"%d\n",*(gatherallijk[iTime/3]+j));fprintf(fptime,"%d\n",*(gatherallijk[iTime/3]+j+1));fprintf(fptime,"%d\n",*(gatherallijk[iTime/3]+j+2));
//f.Write(gatherijk+j+1,2);
//f.Write(gatherijk+j+2,2);
f3.Write(gatheralldpl[iTime/3]+j,4*3);//f.Write(gatherdpl+j,4*3);
//fprintf(fptime,"%f\n",*(gatheralldpl[iTime/3]+j));fprintf(fptime,"%f\n",*(gatheralldpl[iTime/3]+j+1));fprintf(fptime,"%f\n",*(gatheralldpl[iTime/3]+j+2));
//f.Write(gatherdpl+j+1,4);
//f.Write(gatherdpl+j+2,4);
};
};
f3.Close();
//printf("f-over\n"); fclose(fptime);
//}//single-tour.dpl-end
//fprintf(stdout,"f3.Close();,myid=%d\n",myid);fflush(stdout);
//by sf 090408 write dpl --end
//#pragma omp single //single tour.ecp begin
//{
// Save endocardial potential data
CFile f4;
CFileException e4;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'e');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'p');
if( !f4.Open( dataPath+"tour.ecp ", CFile::modeCreate | CFile::modeWrite, &e4 ) )
{
#ifdef _DEBUG
afxDump << "File could not be opened " << e4.m_cause << "\n";
#endif
}
//FILE *fptime;//by sf
//
//fptime=fopen(dataPath+"ecp-gpu.txt","w") ;//by sf
//fprintf(fptime,"%d\n",nTimeStep);//by sf
f4.Write(&nTimeStep,2);
for(i=1;i<=nTimeStep;i++) {f4.Write(iStep+i,2);
//fprintf(fptime,"%d\n",*(iStep+i));
}
f4.Write(&NendoB, 2);
f4.Write(&NendoC, 2);
//fprintf(fptime,"%d\n",NendoB);
//fprintf(fptime,"%d\n",NendoC);
for(i=0;i<NendoB;i++) {
f4.Write(&endoBx[i], 2);
f4.Write(&endoBy[i], 2);
f4.Write(&endoBz[i], 2);
//fprintf(fptime,"%d\n",endoBx[i]);
//fprintf(fptime,"%d\n",endoBy[i]);
//fprintf(fptime,"%d\n",endoBz[i]);
}
for(i=0;i<NendoC;i++) {
f4.Write(&endoCx[i], 2);
f4.Write(&endoCy[i], 2);
f4.Write(&endoCz[i], 2);
//fprintf(fptime,"%d\n",endoCx[i]);
//fprintf(fptime,"%d\n",endoCy[i]);
//fprintf(fptime,"%d\n",endoCz[i]);
}
// fclose(fptime);
//fptime=fopen(dataPath+"ecp-endoPOT-gpu.txt","w") ;
//TRACE("\nTotal Time Step: %d, Total Endocardial Points: %d+%d",nTimeStep,NendoB,NendoC);
for(i=0;i<nTimeStep;i++) {
for(j=0;j<(NendoB+NendoC);j++) {
f4.Write(endoPOT[i]+j,4);
//fprintf(fptime,"%f\n",*(endoPOT[i]+j));
}
}
f4.Close();
//fclose(fptime);
//} //single tour.ecp end
//fprintf(stdout,"f4.Close();,myid=%d\n",myid);fflush(stdout);
// Save VCG data
// char* pFileName = "f:/VCG/VCG.6";
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'v');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'g');
//#pragma omp single //single tour.vcg begin
//{
CFile f5;
CFileException e5;
if( !f5.Open( dataPath+"tour.vcg ", CFile::modeCreate | CFile::modeWrite, &e5 ) )
{
#ifdef _DEBUG
afxDump << "File could not be opened " << e5.m_cause << "\n";
#endif
}
f5.Write(&nVCG,2);
for(j=1;j<=nVCG;j++) {
f5.Write(iStep+j,2);
for(i=0;i<3;i++)
f5.Write(VCGs[i]+j,4);
}
f5.Close();
//} //single tour.vcg end
// ----- save potential data ------
// ++++ eff is obtained to make max. value of ECG =2.0mv ++++
eff=(float)26.5730;
// pFileName = "f:/BSP/BSP.6";
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'b');
//filepath.SetAt(index+2,'s');
//filepath.SetAt(index+3,'p');
//FILE *fptime;//sf
//fptime=fopen(dataPath+"bsp-gpu.txt","w") ;//sf
//#pragma omp single //single tour.bsp begin
//{
CFile f6;
CFileException e6;
if( !f6.Open( dataPath+"tour.bsp ", CFile::modeCreate | CFile::modeWrite, &e6 ) )
{
#ifdef _DEBUG
afxDump << "File could not be opened " << e6.m_cause << "\n";
#endif
}
f6.Write(&nTimeStep,2);
//fprintf(fptime,"%d\n",nTimeStep);//sf
for(i=1;i<=nTimeStep;i++) {f6.Write(iStep+i,2);
//fprintf(fptime,"%d\n",*(iStep+i));//sf
}
for(i=1;i<=nTimeStep;i++) {
int n = 0;
for(n=0;n<NL;n++) {
BSPm=(short int)(eff*(*(POT[n]+i)));
f6.Write(&BSPm,2);
//fprintf(fptime,"%d\n",BSPm);//sf
}
}
f6.Close();
//fprintf(stdout,"f6.Close();nTimeStep=%d,myid=%d\n",nTimeStep,myid);fflush(stdout);
//} //single tour.bsp end
//fclose(fptime);//sf
//-------------------- modified by ALF at 2008-8-19 begin -------------------->
//add: save epicardial potential as well as position
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'e');
//filepath.SetAt(index+2,'p');
//filepath.SetAt(index+3,'c');
//#pragma omp single //single tour.ecp begin
//{
CFile f7;
CFileException e7;
if( !f7.Open( dataPath+"tour.epc ", CFile::modeCreate | CFile::modeWrite, &e7 ) )
{
#ifdef _DEBUG
afxDump << "File could not be opened " << e7.m_cause << "\n";
#endif
}
//fprintf(stdout,"f7.Write(&nTimeStep,sizeof(nTimeStep));Nepic=%d,nTimeStep=%d,myid=%d\n",Nepic,nTimeStep,myid);fflush(stdout);
f7.Write(&nTimeStep,sizeof(nTimeStep));
for(i=1;i<=nTimeStep;i++)
f7.Write(iStep+i,sizeof(short int));
//fprintf(stdout,"for(i=1;i<=nTimeStep;i++) ;Nepic=%d,nTimeStep=%d,myid=%d\n",Nepic,nTimeStep,myid);fflush(stdout);
f7.Write(&Nepic, sizeof(Nepic));
for(i=0;i<Nepic;i++) {
f7.Write(&epicX[i], sizeof(short int));
f7.Write(&epicY[i], sizeof(short int));
f7.Write(&epicZ[i], sizeof(short int));
}
//fprintf(stdout,"for(i=0;i<Nepic;i++);Nepic=%d,nTimeStep=%d,myid=%d\n",Nepic,nTimeStep,myid);fflush(stdout);
//FILE *fptime;
//fptime=fopen(dataPath+"Nepic1.txt","a") ;
//fprintf(fptime,"**********useGPU=%d****nTimeStep=%d**Nepic=%d*\n",useGPU,nTimeStep,Nepic);
//TRACE("\nTotal Time Step: %d, Total Endocardial Points: %d+%d",nTimeStep,NendoB,NendoC);
for(i=0;i<nTimeStep;i++) {
for(j=0;j<Nepic;j++) {
f7.Write(&epicPOT[i][j],sizeof(float));
//fprintf(fptime,"%f\n",epicPOT[i][j]);
}
}
f7.Close(); //fclose(fptime);
//fprintf(stdout,"f7.Close();,myid=%d\n",myid);fflush(stdout);
//-------------------- modified by ALF at 2008-8-19 end --------------------<
//printf("free1-,mtime=%d\n",mTime);
//} //single tour.epc end
//#pragma omp barrier
//};//by sf 090403 OpenMP--end
}; // if (myid==0){
//fprintf(stdout,"***comunicate--------111--,myid=%d\n",myid);fflush(stdout);
//for(iTime=3;iTime<=mTime;iTime=iTime+3)
//{
// //free(gatheralldpl[iTime/3]);free(gatherallijk[iTime/3]);
//};
// fprintf(stdout,"***comunicate-1111-ok,myid=%d\n",myid);fflush(stdout);
//free(countallijk);//free(countallijk_reduce); //free(iTimetid);
//free(schedulelist);//free(iTimeloops);
//fprintf(stdout,"***comunicate-2222-ok,myid=%d\n",myid);fflush(stdout);
bsptime[3] =clock();
//printf("BSPitmm,begin-end=%f,writefile=%f,useGPU=%d,threadnum=%d\n",(bsptime[1]-bsptime[0])/CLK_TCK,(bsptime[2]-bsptime[1])/CLK_TCK,useGPU,threadnum);
/*if(myid==0)
{
FILE *fptime;
fptime=fopen(dataPath+"gputime.txt","a") ;
fprintf(fptime,"!!!MPItime = %f,myid=%d,*(itask[1]+0)=%d\n", (MPItimeend-MPItimebegin)/CLK_TCK,myid,*(itask[1]+0));
fprintf(stdout,"BSPmitttime=%f,communicate=%f,writefile=%f\n",(bsptime[1]-bsptime[0])/CLK_TCK,(bsptime[2]-bsptime[1])/CLK_TCK,(bsptime[3]-bsptime[2])/CLK_TCK);
fprintf(fptime,"BSPmitttime=%f,communicate=%f,writefile=%f\n",(bsptime[1]-bsptime[0])/CLK_TCK,(bsptime[2]-bsptime[1])/CLK_TCK,(bsptime[3]-bsptime[2])/CLK_TCK);
fclose(fptime);
};*/
for(i=0;i<3;i++) {
free(VCGs[i]);
free(VCGs_reduce[i]);//by sf 090622
free(tnd[i]);
}
for(i=0;i<TSTEP;i++) {
free(endoPOT[i]);
//free(endoPOT_reduce[i]);//by sf 090622
}
//fprintf(stdout,"***comunicate-3333-ok,myid=%d\n",myid);fflush(stdout);
free(hnn);
free(endoHnnA);
free(endoHnnB);
free(endoHnnC);
//fprintf(stdout,"***comunicate-0000-ok,myid=%d\n",myid);fflush(stdout);
//return;
}
//-------------------- modified by ALF at 2008-8-19 begin -------------------->
//modified
void BSPitmm(short int iTime0, short int **tnd,float *hnn, float *endoHnnA, float *endoHnnB, float *endoHnnC,float **endoPOT,float **VCGs,short int nsnrt, float *epicHnn, float *epicPOT) {
//void BSPitmm(short int iTime0, short int **tnd,float *hnn, float *endoHnnA, float *endoHnnB, float *endoHnnC, float *epicHnn, float *epicPOT) {
ASSERT(epicHnn != NULL);
//-------------------- modified by ALF at 2008-8-19 end --------------------<
float aptcalm(short int,short int,short int,short int,short int);
void anfct(short int i, short int j, short int k, float v[3]);
char iCell;
const short int OK_SAV=1;
short int iseqx[12]={ -1, 0, 0, 1, 1, 0, 1, 0, 0,-1,-1, 0 };
short int iseqy[12]={ 0,-1, 0,-1, 0, 1, 0, 1, 0, 1, 0,-1 };
short int iseqz[12]={ 0, 0,-1, 0,-1,-1, 0, 0, 1, 0, 1, 1 };
short int nskip=2;
short int i,j,k,l,ix,iy,iz,icell,l6,jx,jy,jz,jcell;
int nsum,n;
int intvl;
int idpl;
float asd,add,rtmax,gsum,compm,compp,compo,ax,ay,az;
float r1,r3,r5,dr,ds,rv3,bx,by,bz,ECGs;
float der[NL],ders[NL];
double grad[6];
float dpl[3];
float posi, posj, posk;
float r2,GRD;
float tmpdpl;
// endocardial
int n0,n1,n2,ni;
float *surfPOTi,*u1;
short int nhb, eTime;
//long temploc;
int tid=omp_get_thread_num();
int myid, numprocs;
int namelen;
// fprintf(stdout,"BSPitmm !! tid= %d myid= %d numprocs= %d is processor_name= %s,iTime0= %d\n",tid, myid, numprocs, processor_name,iTime0);
//fflush(stdout);
short int countijk=0;//by sf-090329***countijk临时记录ijk需要写的次数,gatherijk[20000],
//float gatherdpl[60000];//by sf-090321***countijk临时记录ijk需要写的次数
float *endoHnnA_old,*POTi_old,VCG_old[3];//by sf-090402-4
endoHnnA_old=(float *) malloc(2*NENDO*ND3*4);//by sf-090402-4
POTi_old=(float *) malloc(NL*4);//by sf-090403-1
float *epicPOTold;
epicPOTold=(float *) malloc(Nepic*4);
for(i=0;i<Nepic;i++) *(epicPOTold+i)=(float)0;
//double bsptimes1[3]={0.0,0.0,0.0};bsptimes1[0] = clock();
surfPOTi=(float *) malloc((NL-2)*2*4);
u1=(float *) malloc((NL-2)*2*4);
if ((surfPOTi==NULL)||(u1==NULL)) {
MessageBox(NULL,"Out of memory !",NULL,MB_OK);
return;// 0;
}
for(ni=0;ni<(NL-2)*2;ni++) {
*(surfPOTi+ni)=(float)0;
*(u1+ni)=(float)0;
}
for(ni=0;ni<(NendoB+NendoC);ni++) {
*(endoHnnA_old+ni)=(float)0;
}
// ------- initialization ---------
for(i=0;i<NL;i++) ders[i]=(float)0;
// Save dipole data
CFile f;
CFileException e;
short int index;
while (1) {
idpl = 0;
asd=(float)0;
add=(float)0;
nsum=0;
rtmax=(float)0;
for(n=0;n<nPos;n++) {
*(POTi_old+n)=(float)0;
der[n]=(float)0;
}
for(n=0;n<3;n++) VCG_old[n]=(float)0;//VCG[n]=(float)0;
//tid=omp_get_thread_num();
//if (useGPU==1 && tid==0) gpu_BSPitmm_Malloc(POTi_old,der,endoHnnA_old,surfPOTi);//Comment by SWF (2009-2-7-15)(For:)//by sf-090402-4
//f.Write(&iTime0,2);//by sf 090329
//add fibre conduction contribution, iCell
if (useGPU==1 && tid==0 && GPUnum>0)
{
gpu_BSPitmm_HostToDevice(POTi_old,der,endoHnnA_old,surfPOTi);
};
for (nhb=0; nhb<nHB; nhb++) {
i=iHB[0][nhb];
j=iHB[1][nhb];
k=iHB[2][nhb];
for (ni=0;ni<mxcycle;ni++) {
eTime=vHB[ni][nhb];
if (eTime==(short int)(iTime0/3)) {
compo=(aptcalm(i,j,k,4,iTime0)+90)/nskip/nskip;
dpl[0]=compo/10;
dpl[1]=compo/10;
dpl[2]=compo;
if (OK_SAV==1) {
//by sf-090329
//f.Write(&i,2);
//f.Write(&j,2);
//f.Write(&k,2);
//for (n=0;n<3;n++) {
// f.Write(&dpl[n],4);
//}
*(gatherallijk[iTime0/3]+countijk)=i;*(gatherallijk[iTime0/3]+countijk+1)=j;*(gatherallijk[iTime0/3]+countijk+2)=k;
*(gatheralldpl[iTime0/3]+countijk)=dpl[0];*(gatheralldpl[iTime0/3]+countijk+1)=dpl[1];*(gatheralldpl[iTime0/3]+countijk+2)=dpl[2];
countijk=countijk+3;
idpl++;
}
posi=HRTx0+i*tmswf[0][0]+j*tmswf[0][1]+k*tmswf[0][2];
posj=HRTy0+i*tmswf[1][0]+j*tmswf[1][1]+k*tmswf[1][2];
posk=HRTz0+i*tmswf[2][0]+j*tmswf[2][1]+k*tmswf[2][2];
// potential distribution generated by
// a single dipole in infinite medium
if (useGPU==1 && tid==0 && GPUnum>0)
{
//gpu_BSPitmm_HostToDevice(POTi,der,endoHnnA,surfPOTi);
gpu_dpl_all(0,posi,posj,posk,nPos,dpl,POTi_old,der,HRTx0,HRTy0,HRTz0,NendoB,NendoC,endoHnnA_old,endoBx,endoBy,endoBz,tmswf,epicPOTold);
//gpu_dpl_nPos(posi,posj,posk,nPos,dpl,POTi_old,der);
//gpu_dpl_Nendo(posi,posj,posk,HRTx0,HRTy0,HRTz0,NendoB,0,dpl,endoHnnA_old,endoBx,endoBy,endoBz,tmswf);
//gpu_dpl_Nendo(posi,posj,posk,HRTx0,HRTy0,HRTz0,NendoC,NendoB,dpl,endoHnnA_old,endoCx,endoCy,endoCz,tmswf);
//gpu_dpl_nPos_2(posi,posj,posk,dpl);
//gpu_BSPitmm_DeviceToHost(POTi,der,endoHnnA,surfPOTi);
}
else
{
///* //sf
for(n=0;n<nPos;n++) {
ax=*(r[0]+n)-posi;
ay=*(r[1]+n)-posj;
az=*(r[2]+n)-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
r5=(float)(r2*r3);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
ds=3*dr/r5;
rv3=1/r3;
bx=dpl[0]*rv3-ax*ds;
by=dpl[1]*rv3-ay*ds;
bz=dpl[2]*rv3-az*ds;
*(POTi_old+n)+=dr*rv3;
*(der+n)+=*(rn[0]+n)*bx+*(rn[1]+n)*by+*(rn[2]+n)*bz;
}
//TRACE("\niCell4 %d %d %d %d %f %f",iTime0,i,j,k,compo,*(POTi+94));
for(n=0;n<NendoB;n++) {
// ---- measurement location -------
ax=HRTx0+endoBx[n]*tmswf[0][0]+endoBy[n]*tmswf[0][1]+endoBz[n]*tmswf[0][2]-posi;
ay=HRTy0+endoBx[n]*tmswf[1][0]+endoBy[n]*tmswf[1][1]+endoBz[n]*tmswf[1][2]-posj;
az=HRTz0+endoBx[n]*tmswf[2][0]+endoBy[n]*tmswf[2][1]+endoBz[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(endoHnnA_old+n)+=dr*rv3;
}
for(n=0;n<NendoC;n++) {
// ---- measurement location -------
ax=HRTx0+endoCx[n]*tmswf[0][0]+endoCy[n]*tmswf[0][1]+endoCz[n]*tmswf[0][2]-posi;
ay=HRTy0+endoCx[n]*tmswf[1][0]+endoCy[n]*tmswf[1][1]+endoCz[n]*tmswf[1][2]-posj;
az=HRTz0+endoCx[n]*tmswf[2][0]+endoCy[n]*tmswf[2][1]+endoCz[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(endoHnnA_old+n+NendoB)+=dr*rv3;
}
for(n=0;n<(NL-2)*2;n++) {
// ---- measurement location -------
n0=*(tnd[0]+n)-1;
n1=*(tnd[1]+n)-1;
n2=*(tnd[2]+n)-1;
ax=(*(r[0]+n0)+*(r[0]+n1)+*(r[0]+n2))/3-posi;
ay=(*(r[1]+n0)+*(r[1]+n1)+*(r[1]+n2))/3-posj;
az=(*(r[2]+n0)+*(r[2]+n1)+*(r[2]+n2))/3-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(surfPOTi+n)+=dr*rv3;
//Uinf
}
//*/ //sf
}
}
}
}
/*
for (nhb=0; nhb<nttl; nhb++) {
i=ipttl[0][nhb];
j=ipttl[1][nhb];
k=ipttl[2][nhb];
iCell=*(mapCell[k]+j*NJ+i);
if (iCell<=1) continue;
if (iCell>=15) continue;
temploc=*(locXCT[k]+j*NJ+i);
eTime=*(mapXCTm[iTime0/mBCL]+temploc);
if (eTime==(short int)(iTime0/3)) {
dpl[0]=50;
dpl[1]=50;
dpl[2]=50;
posi=HRTx0+i*tmswf[0][0]+j*tmswf[0][1]+k*tmswf[0][2];
posj=HRTy0+i*tmswf[1][0]+j*tmswf[1][1]+k*tmswf[1][2];
posk=HRTz0+i*tmswf[2][0]+j*tmswf[2][1]+k*tmswf[2][2];
// potential distribution generated by
// a single dipole in infinite medium
for(n=0;n<nPos;n++) {
ax=*(r[0]+n)-posi;
ay=*(r[1]+n)-posj;
az=*(r[2]+n)-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
r5=(float)(r2*r3);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
ds=3*dr/r5;
rv3=1/r3;
bx=dpl[0]*rv3-ax*ds;
by=dpl[1]*rv3-ay*ds;
bz=dpl[2]*rv3-az*ds;
*(POTi+n)+=dr*rv3;
*(der+n)+=*(rn[0]+n)*bx+*(rn[1]+n)*by+*(rn[2]+n)*bz;
}
TRACE("\niCellx %d %d %d %d %f %f",iTime0,i,j,k,compo,*(POTi+94));
for(n=0;n<NendoB;n++) {
// ---- measurement location -------
ax=HRTx0+endoBx[n]*tmswf[0][0]+endoBy[n]*tmswf[0][1]+endoBz[n]*tmswf[0][2]-posi;
ay=HRTy0+endoBx[n]*tmswf[1][0]+endoBy[n]*tmswf[1][1]+endoBz[n]*tmswf[1][2]-posj;
az=HRTz0+endoBx[n]*tmswf[2][0]+endoBy[n]*tmswf[2][1]+endoBz[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(endoHnnA+n)+=dr*rv3;
}
for(n=0;n<NendoC;n++) {
// ---- measurement location -------
ax=HRTx0+endoCx[n]*tmswf[0][0]+endoCy[n]*tmswf[0][1]+endoCz[n]*tmswf[0][2]-posi;
ay=HRTy0+endoCx[n]*tmswf[1][0]+endoCy[n]*tmswf[1][1]+endoCz[n]*tmswf[1][2]-posj;
az=HRTz0+endoCx[n]*tmswf[2][0]+endoCy[n]*tmswf[2][1]+endoCz[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(endoHnnA+n+NendoB)+=dr*rv3;
}
for(n=0;n<(NL-2)*2;n++) {
// ---- measurement location -------
n0=*(tnd[0]+n)-1;
n1=*(tnd[1]+n)-1;
n2=*(tnd[2]+n)-1;
ax=(*(r[0]+n0)+*(r[0]+n1)+*(r[0]+n2))/3-posi;
ay=(*(r[1]+n0)+*(r[1]+n1)+*(r[1]+n2))/3-posj;
az=(*(r[2]+n0)+*(r[2]+n1)+*(r[2]+n2))/3-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(surfPOTi+n)+=dr*rv3;
//Uinf
}
}
}
*/
//gpu_BSPitmm_HostToDevice(POTi,der,endoHnnA,surfPOTi);
//printf("Time=%d,\n", iTime0);
// up, bottom, left, right, front, behind
for (k=0;k<=NK;k+=nskip) { // i,j // < --> <= August 10,1996
for (j=0;j<NJ;j+=nskip) {
for (i=NI;i>-1;i-=nskip) {
if (k<*(kmin+NI*j+i) || k>*(kmax+NI*j+i)) {
continue;
}
iCell=*(mapCell[k]+NI*j+i);
// +++++++++ special fiber neglected +++++
if (iCell<=1) continue; /*<Comment by ALF> null or SN*/
if (iCell>=15) continue; /*<Comment by ALF> out of define*/
// include fiber conduction
if((iCell>=3)&&(iCell<=6)) continue; /*<Comment by ALF> not AVN HB BB PKJ*/
compo=aptcalm(i,j,k,iCell,iTime0);
// --------- neighberhood search ---------
gsum=(float)0;
for (l=0;l<6;l++) {
compm=(float)0.0;
compp=(float)0.0;
grad[l]=(double)0.0;
ix=i+iseqx[l];
iy=j+iseqy[l];
iz=k+iseqz[l];
if ((ix>=0)&&(ix<NI)&&(iy>=0)&&(iy<NJ)&&(iz>=0)&&(iz<NK)) {
icell=*(mapCell[iz]+iy*NI+ix);
if ((icell>1)&&(icell<15)&&((icell<3)||(icell>6))) {
compm=aptcalm(ix,iy,iz,icell,iTime0);
//if (iTime0 ==3 && compm != -90.) TRACE("\nB %d %d %d %f",ix,iy,iz, compm);
grad[l]+=compm-compo;
}
}
l6=l+6; /*<Comment by ALF> opposite one*/
jx=i+iseqx[l6];
jy=j+iseqy[l6];
jz=k+iseqz[l6];
if ((jx>=0)&&(jx<NI)&&(jy>=0)&&(jy<NJ)&&(jz>=0)&&(jz<NK)) {
jcell=*(mapCell[jz]+jy*NI+jx);
if ((jcell>1)&&(jcell<15)&&((jcell<3)||(jcell>6))) {
compp=aptcalm(jx,jy,jz,jcell,iTime0);
grad[l]+=compo-compp;
}
}
}
for (l=0;l<6;l++)
gsum+=(float)fabs((double)grad[l]);
if (gsum==0) continue;
// close dpl file
// dipole number --> nsum; position-->ipos
for (n=0;n<3;n++) {
dpl[n]=(float)0;
for (short int m=0;m<6;m++)
dpl[n]+=tmswf[n][m]*grad[m];
// -- take conductivity factor into consideration --
dpl[n]=dpl[n]*(*(iparm+NPARM*(iCell-1)+12))/(100);
// f.Write(&dpl[n],4);
// *(dplm[n]+idpl) = dpl[n];
// >>>>> moved to an independent loop below >>>>
// VCG[n]+=dpl[n];
}
// >>>>>>>>>> aniso >>>>>>>>
tmpdpl=dpl[0];
if (ANISO==1 && icell==7) {
anfct(i,j,k,dpl);
}
// if (tmpdpl-dpl[0]>0.0001 || tmpdpl-dpl[0]<-0.0001)
// TRACE("\ndpl %2d %2d %2d %f %f",i+1,j+1,k+1, tmpdpl, dpl[0]);
if (OK_SAV==1) {
//by sf-090329
//f.Write(&i,2);
//f.Write(&j,2);
//f.Write(&k,2);
//for (n=0;n<3;n++) {
// f.Write(&dpl[n],4);
//}
*(gatherallijk[iTime0/3]+countijk)=i;*(gatherallijk[iTime0/3]+countijk+1)=j;*(gatherallijk[iTime0/3]+countijk+2)=k;
*(gatheralldpl[iTime0/3]+countijk)=dpl[0];*(gatheralldpl[iTime0/3]+countijk+1)=dpl[1];*(gatheralldpl[iTime0/3]+countijk+2)=dpl[2];
countijk=countijk+3;
}
for (n=0;n<3;n++) {
VCG_old[n]+=dpl[n];//VCG[n]+=dpl[n];
}
idpl++;
// <<<<<<<<<< aniso <<<<<<<<
posi=HRTx0+i*tmswf[0][0]+j*tmswf[0][1]+k*tmswf[0][2];
posj=HRTy0+i*tmswf[1][0]+j*tmswf[1][1]+k*tmswf[1][2];
posk=HRTz0+i*tmswf[2][0]+j*tmswf[2][1]+k*tmswf[2][2];
// potential distribution generated by
// a single dipole in infinite medium
//------------ 2009-2-4-16 BY SWF---------
// comment:
//printf("nPos*,itime0=%d", iTime0);
if (useGPU==1 && tid==0 && GPUnum>0)
{
// gpu_freetransdata();
//gpu_transdata(tnd,r,rn,endoBx,endoBy,endoBz,endoCx,endoCy,endoCz,tmswf);
//gpu_BSPitmm_HostToDevice(POTi,der,endoHnnA,surfPOTi);
gpu_dpl_all(1,posi,posj,posk,nPos,dpl,POTi_old,der,HRTx0,HRTy0,HRTz0,NendoB,NendoC,endoHnnA_old,endoBx,endoBy,endoBz,tmswf,epicPOTold);
//gpu_dpl_nPos(posi,posj,posk,nPos,dpl,POTi_old,der);
//gpu_dpl_Nendo(posi,posj,posk,HRTx0,HRTy0,HRTz0,NendoB,0,dpl,endoHnnA_old,endoBx,endoBy,endoBz,tmswf);
//gpu_dpl_Nendo(posi,posj,posk,HRTx0,HRTy0,HRTz0,NendoC,NendoB,dpl,endoHnnA_old,endoCx,endoCy,endoCz,tmswf);
//gpu_dpl_nPos_2(posi,posj,posk,dpl);
//gpu_dpl_Nepic(posi,posj,posk,HRTx0,HRTy0,HRTz0,dpl,tmswf,epicPOTold);
//gpu_BSPitmm_DeviceToHost(POTi,der,endoHnnA,surfPOTi);
///* //printf("$");
}
else
{
for(n=0;n<nPos;n++) {
ax=*(r[0]+n)-posi;
ay=*(r[1]+n)-posj;
az=*(r[2]+n)-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
r5=(float)(r2*r3);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
ds=3*dr/r5;
rv3=1/r3;
bx=dpl[0]*rv3-ax*ds;
by=dpl[1]*rv3-ay*ds;
bz=dpl[2]*rv3-az*ds;
*(POTi_old+n)+=dr*rv3;
*(der+n)+=*(rn[0]+n)*bx+*(rn[1]+n)*by+*(rn[2]+n)*bz;
}
// endocadial potential distribution generated by
// a single dipole in infinite medium
for(n=0;n<NendoB;n++) {
// ---- measurement location -------
ax=HRTx0+endoBx[n]*tmswf[0][0]+endoBy[n]*tmswf[0][1]+endoBz[n]*tmswf[0][2]-posi;
ay=HRTy0+endoBx[n]*tmswf[1][0]+endoBy[n]*tmswf[1][1]+endoBz[n]*tmswf[1][2]-posj;
az=HRTz0+endoBx[n]*tmswf[2][0]+endoBy[n]*tmswf[2][1]+endoBz[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(endoHnnA_old+n)+=dr*rv3;
}
for(n=0;n<NendoC;n++) {
// ---- measurement location -------
ax=HRTx0+endoCx[n]*tmswf[0][0]+endoCy[n]*tmswf[0][1]+endoCz[n]*tmswf[0][2]-posi;
ay=HRTy0+endoCx[n]*tmswf[1][0]+endoCy[n]*tmswf[1][1]+endoCz[n]*tmswf[1][2]-posj;
az=HRTz0+endoCx[n]*tmswf[2][0]+endoCy[n]*tmswf[2][1]+endoCz[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(endoHnnA_old+n+NendoB)+=dr*rv3;
}
//-------------------- modified by ALF at 2008-8-19 begin -------------------->
//add: epicardial potential distribution generated by
// a single dipole in infinite medium
for (n=0; n<Nepic; ++n) {
ax=HRTx0+epicX[n]*tmswf[0][0]+epicY[n]*tmswf[0][1]+epicZ[n]*tmswf[0][2]-posi;
ay=HRTy0+epicX[n]*tmswf[1][0]+epicY[n]*tmswf[1][1]+epicZ[n]*tmswf[1][2]-posj;
az=HRTz0+epicX[n]*tmswf[2][0]+epicY[n]*tmswf[2][1]+epicZ[n]*tmswf[2][2]-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(epicPOT+n)+=dr*rv3;
}
//-------------------- modified by ALF at 2008-8-19 end --------------------<
for(n=0;n<(NL-2)*2;n++) {
// ---- measurement location -------
n0=*(tnd[0]+n)-1;
n1=*(tnd[1]+n)-1;
n2=*(tnd[2]+n)-1;
ax=(*(r[0]+n0)+*(r[0]+n1)+*(r[0]+n2))/3-posi;
ay=(*(r[1]+n0)+*(r[1]+n1)+*(r[1]+n2))/3-posj;
az=(*(r[2]+n0)+*(r[2]+n1)+*(r[2]+n2))/3-posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
rv3=1/r3;
*(surfPOTi+n)+=dr*rv3;
//Uinf
}
};////test sf
//*/
//------------ 2009-2-4-16 BY SWF---------
}
}
}
if (useGPU==1 && tid==0 && GPUnum>0)
{
gpu_BSPitmm_DeviceToHost(epicPOTold,POTi_old,der,endoHnnA_old,surfPOTi);
for(i=0;i<Nepic;i++) *(epicPOT+i)=*(epicPOTold+i);
}
//bsptimes1[1] = clock();
//if (iTime0<1800 ) //sf
// {float dd=0,pp=0;
// for(int ff=0;ff<NendoB;ff++)
// {
// dd+=*(endoHnnA+ff);
// //pp+=*(POTi+ff);
// }
// FILE *fptime;
// fptime=fopen(dataPath+"data.txt","a") ;
// fprintf(fptime,"iTime0=%d,endoHnnA=%f,,\n",iTime0,dd);
// printf("iTime0=%d,endoHnnA=%f,,\n",iTime0,dd);
// fclose(fptime);
// };
//---- next Step -----
GRD=0; // ? April 29, 1996
for(i=0;i<3;i++)
GRD+=(bufVCG[1][i]-VCG_old[i])*(bufVCG[1][i]-VCG_old[i]);//GRD+=(bufVCG[1][i]-VCG[i])*(bufVCG[1][i]-VCG[i]);
intvl=iTime0-itbuf;
GRD=sqrt(GRD)/intvl;
GRD=100*GRD/939.513;
/*
i=0;
for (ni=0;ni<mxcycle;ni++) {
eTime=vHB[ni][0];
if ((eTime-iTime0/3)<4*ND) {
i=1;
}
}
if (i==1) {
nextStep=3*ND;
break;
}
if (GRD>10*ND) {
if((bufGRD<2)&&(intvl>3)) {
iTime0=itbuf+3;
continue;
}
nextStep=3*ND;
//nextStep=ND;
break;
}
if (GRD>5*ND) {
nextStep=6*ND; // 9 --> 6 August 11, 1996
//nextStep=2*ND; // 9 --> 6 August 11, 1996
break;
}
nextStep=12*ND; // 21 --> 12 August 11, 1996
*/
nextStep=3*ND; // 21 --> 12 August 11, 1996
break;
}
itbuf=iTime0;
bufGRD=(float)GRD;
// ---- the same value with the previous two ? --
for (n=0;n<3;n++) {
if ((VCG_old[n]!=bufVCG[0][n])||(VCG_old[n]!=bufVCG[1][n])) {//if ((VCG[n]!=bufVCG[0][n])||(VCG[n]!=bufVCG[1][n])) {
n=-1;
break;
}
}
//if (n != -1) {
// answer='s';
// answer='d';
// //return;
//}
//else
//{
answer='d';
for (n=0;n<3;n++) {
bufVCG[0][n]=bufVCG[1][n];
bufVCG[1][n]=VCG_old[n];//bufVCG[1][n]=VCG[n];
}
// --- boundary condition into Account-------
ECGs=(float)0;
for(j=0;j<nPos;j++) ECGs+=*(POTi_old+j);
ECGs*=alp;
for(j=0;j<nPos;j++) {
*(ders+j)=(float)0;
for(k=0;k<nPos;k++) *(ders+j)+=*(aw[j]+k)*(*(der+k)); // aw : j,k or k,j ?
*(POTi_old+j)+=-*(ders+j)-*(bw+j)*ECGs;
}
// body surface triangle
float sum, tmp, triarea, sumarea;
for (j=0; j<(NL-2)*2;j++) {
sum=0.0;
for(k=0;k<(nPos-2)*2;k++) {
tmp=*(surfPOTi+k);
sum+=*(hnn+j*(nPos-2)*2+k) * tmp;
}
*(u1+j)=sum;
}
triarea=0.0;
sumarea=0.0;
for(n=0;n<(NL-2)*2;n++) {
// ---- measurement location -------
n0=*(tnd[0]+n)-1;
n1=*(tnd[1]+n)-1;
n2=*(tnd[2]+n)-1;
ax=(*(r[0]+n0)-*(r[0]+n1));
ay=(*(r[1]+n0)-*(r[1]+n1));
az=(*(r[2]+n0)-*(r[2]+n1));
bx=(*(r[0]+n0)-*(r[0]+n2));
by=(*(r[1]+n0)-*(r[1]+n2));
bz=(*(r[2]+n0)-*(r[2]+n2));
tmp=(ax*by-bx*ay)*(ax*by-bx*ay)+(ax*bz-bx*az)*(ax*bz-bx*az)+(az*by-bz*ay)*(az*by-bz*ay);
tmp=0.5*sqrt(tmp);
triarea+=*(u1+n)*tmp;
sumarea+=tmp;
}
for (n=0; n<NendoB;n++) {
sum=0.0;
for(k=0;k<(nPos-2)*2;k++) {
tmp=*(u1+k);
sum +=*(endoHnnB+n*(nPos-2)*2+k) * tmp;
}
*(endoHnnA_old+n)+=sum-triarea/sumarea;
}
for (n=0; n<NendoC;n++) {
sum=0.0;
for(k=0;k<(nPos-2)*2;k++) {
tmp=*(u1+k);
sum +=*(endoHnnC+n*(nPos-2)*2+k) * tmp;
}
*(endoHnnA_old+n+NendoB)+=sum-triarea/sumarea;
}
//}//sf-090402-5 if (n != -1) {
//-------------------- modified by ALF at 2008-8-19 begin -------------------->
//add
for (n=0; n<Nepic;n++) {
sum=0.0;
for(k=0;k<(nPos-2)*2;k++) {
tmp=*(u1+k);
sum +=*(epicHnn+n*(nPos-2)*2+k) * tmp;
}
*(epicPOT+n)+=sum-triarea/sumarea;
}
//-------------------- modified by ALF at 2008-8-19 end --------------------<
//by sf 090329
/*
#pragma omp critical
{//critical--begin
if (OK_SAV==1) {
CFile f2;
CFileException e2;
//short int index2;
//index2=filepath.FindOneOf(".");
//filepath.SetAt(index2+1,'d');
//filepath.SetAt(index2+2,'p');
//filepath.SetAt(index2+3,'n');
if (iTime0 > 3) {
if (!f2.Open(dataPath+"tour.dpn ",CFile::modeReadWrite, &e2 )) {
f2.Open(dataPath+"tour.dpn ",CFile::modeCreate|CFile::modeReadWrite, &e2 );
}
f2.SeekToEnd();
} else {
if (!f2.Open( dataPath+"tour.dpn ", CFile::modeCreate | CFile::modeWrite, &e2 )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e2.m_cause << "\n";
#endif
}
}
f2.Write(&iTime0,2);
f2.Write(&idpl,2);
f2.Close();
}
if (OK_SAV==1) {
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'d');
//filepath.SetAt(index+2,'p');
//filepath.SetAt(index+3,'l');
if (iTime0 > 3) {
if (!f.Open(dataPath+"tour.dpl ",CFile::modeReadWrite, &e )) {
f.Open(dataPath+"tour.dpl ",CFile::modeCreate|CFile::modeReadWrite, &e );
}
f.SeekToEnd();
} else {
if (!f.Open(dataPath+"tour.dpl ", CFile::modeCreate | CFile::modeWrite, &e )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e.m_cause << "\n";
#endif
}
}
}
f.Write(&iTime0,2);//from line 4070
//f.Write(gatherijk,2*countijk);
//f.Write(gatherdpl,4*countijk);
for(j=0;j<countijk;j=j+3)
{
f.Write(gatherijk+j,2*3);
//f.Write(gatherijk+j+1,2);
//f.Write(gatherijk+j+2,2);
f.Write(gatherdpl+j,4*3);
//f.Write(gatherdpl+j+1,4);
//f.Write(gatherdpl+j+2,4);
}
if (OK_SAV==1) {
f.Close();
}
}//critical--end
*/
//by sf 090408 for dpl[]
// int tmpiTime=iTime0/3;
// gatherallijk[tmpiTime] = (int *) malloc( countijk*sizeof(int) );
// gatheralldpl[tmpiTime] = (float *) malloc( countijk*sizeof(float) );
// *(countallijk+tmpiTime)=countijk;
// for(j=0;j<countijk;j=j+1)
//{
// *(gatherallijk[tmpiTime]+j)=gatherijk[j];
// *(gatheralldpl[tmpiTime]+j)=gatherdpl[j];
// }
// if(iTime0==27)
//{
// printf("27bcasti=%d,myid=%d,ijk=%d,%d,g=%d,%d\n",iTime0,myid,*(gatherallijk[iTime0/3]),*(gatherallijk[iTime0/3]+1),gatherijk[0],gatherijk[1]);
// printf("27bcasti=%d,myid=%d,dpl=%f,%f,%f,%f\n",iTime0,myid,*(gatheralldpl[iTime0/3]),*(gatheralldpl[iTime0/3]+1),gatherdpl[0],gatherdpl[1]);
//};
// by sf 090401 BSPMcal if begin
short int nTimeStep_old=iTime0/3;//nTimeStep=nTimeStep+1;
if ((answer!='s')||(nTimeStep_old<=1)) {
int n = 0;
for(n=0;n<nPos;n++) {
*(POT[n]+nTimeStep_old)=*(POTi_old+n);
}
// add endocardial potential
//printf("iTime=%d,tid=%d,tnum=%d,nTimeStep_old=%d\n",iTime0,omp_get_thread_num(),omp_get_num_threads(),nTimeStep_old);
for(n=0;n<2*NENDO*ND3;n++) {
*(endoPOT[nTimeStep_old-1]+n)=*(endoHnnA_old+n);
}
if(iTime0<=nsnrt) {
//nVCG_old++;//nVCG=nVCG+1;nVCG-->nTimeStep
for(n=0;n<3;n++) {
*(VCGs[n]+nTimeStep_old)=VCG_old[n]/ND; //*(VCGs[n]+nVCG)=VCG[n]/ND;
}
}
}
//bsptimes1[2] = clock();
//printf("%f,%f,bsptimes1[1-0]-bsptimes1[2-1] tid=%d,iTime0=%d\n",(bsptimes1[1]-bsptimes1[0])/CLK_TCK,(bsptimes1[2]-bsptimes1[1])/CLK_TCK,tid,iTime0);
free(endoHnnA_old);free(POTi_old);// by sf 090402-3
free(epicPOTold);
// by sf 090401 if end
free(u1);
free(surfPOTi);
}
// *********** action potential calculation *******
float aptcalm(short int i0,short int j0,short int k0,short int iCell0,short int iTime1) {
short int istp,irsd,lacl,lacl1,iext;
float ACTval;
// ++++ resting potential +++++
ACTval=(float)(*(iparm+NPARM*(iCell0-1)+6));
istp=(short int)(iTime1/3); // each step has 3 time slots
//rdXCTm(istp,i0,j0,k0); //by sf-090401 子程序取消,把该代码贴入,避免idltt和idltc全局变量在OpenMP由于共享出错// get idltt = istp - ncyc, the current step in current cycle
//idlttold-->idltt idltcold-->idltc
short int ncyc,n1cyc;
short int i00,j00,k00,icc,idlttold,idltcold;
i00=i0;j00=j0;k00=k0;icc=istp;
idlttold=INFTIME; /*<Comment by ALF> period between 2 continuous excitation*/
idltcold=0; /*<Comment by ALF> delta of 2 periods */
short int n;
long locxct;
locxct=*(locXCT[k00]+j00*NI+i00);
if(locxct<0)
{//return;
}
else
{
for(n=NCYCL-1;n>=0;n--) {
ncyc=*(mapXCTm[n]+locxct);
if (icc>=ncyc) {
idlttold=icc-ncyc;
break;
}
}
if ((n<=0)||(n>=NCYCL-1))
{//return;
}
else
{
n1cyc=*(mapXCTm[n+1]+locxct);
if (n1cyc==INFTIME)
{
}
else
{
idltcold=n1cyc-ncyc-ncyc+*(mapXCTm[n-1]+locxct);
};
};
};
//rdXCTm(istp,i0,j0,k0);---end --by sf
if (idlttold==INFTIME) { // ACTval=-90 situation
return ACTval;
}
irsd=iTime1-istp*3;
idlttold=idlttold*3+irsd;
// iext=*(mapACT[k0]+j0*NI+i0)+idltc * 3 * *(iparm+(iCell0-1)*NPARM+10)/100;
iext=*(mapAPD[k0]+j0*NI+i0)+idltcold * 3 * *(iparm+(iCell0-1)*NPARM+10)/100;
lacl=la0123[iCell0-1]+iext;
if(idlttold>lacl) return ACTval;
lacl1=la012[iCell0-1]+iext;
//TRACE("\naptcalm %d %d %d %d %d %d",idltt,lacl1,la012[iCell0-1],iext,lacl,*(mapAPD[k0]+j0*NI+i0));
if(idlttold>lacl1) {
idlttold-=iext;
ACTval=*(ydata[iCell0-1]+idlttold);
return ACTval;
}
if(idlttold>la012[iCell0-1]) idlttold=la012[iCell0-1];
ACTval=*(ydata[iCell0-1]+idlttold);
return ACTval;
// --- atrial REPolarization ignored ----
// if(iCell0==2) {
// ACTval=(float)(*(iparm+1*NPARM+7));
// return ACTval;
// }
// +++ la012, time to the end of phase 2; +++++
// +++ la0123,time to the end of phase 3 +++++
}
// ECG calculation
void ECGcal(void) {
float ECG[12],ECGr,ECGl,ECGf;
short int iECG[12],i,j;
float wilson;
float eff=(float)26.5730/ND;
// Save ecg data
CFile f;
CFileException e;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'e');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'g');
if (!f.Open( dataPath+"tour.ecg ", CFile::modeCreate | CFile::modeWrite, &e )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e.m_cause << "\n";
#endif
}
//FILE *fptime;//sf
//fptime=fopen(dataPath+"ecg-int-gpu.txt","w") ;//sf
//fprintf(fptime,"%d\n",nTimeStep);//sf
f.Write(&nTimeStep,2);
for (j=1;j<=nTimeStep;j++)
{f.Write(iStep+j,2);
//fprintf(fptime,"%d\n",*(iStep+j));//sf
}
//fclose(fptime);//sf
//fptime=fopen(dataPath+"ecg-float-gpu.txt","w") ;//sf
// Compute ECG
for (i=1;i<=nTimeStep;i++) {
//ECGr=*(POT[nv[0]]+i);
//ECGl=*(POT[nv[1]]+i);
//ECGf=*(POT[nv[2]]+i);
ECGr=*(POT[nv[0]]+i);
ECGl=*(POT[nv[1]]+i);
ECGf=*(POT[nv[2]]+i);
wilson=(ECGr+ECGl+ECGf)/3;
ECG[0]=*(POT[94]+i)-wilson;
ECG[1]=*(POT[96]+i)-wilson;
ECG[2]=*(POT[117]+i)-wilson;
ECG[3]=*(POT[138]+i)-wilson;
ECG[4]=(*(POT[139]+i)/2+*(POT[140]+i)/2)-wilson;
ECG[5]=*(POT[141]+i)-wilson;
ECG[6]=ECGl-ECGr;
ECG[7]=ECGf-ECGr;
ECG[8]=ECGf-ECGl;
ECG[9]=(ECGr-wilson)*3/2;
ECG[10]=(ECGl-wilson)*3/2;
ECG[11]=(ECGf-wilson)*3/2;
iECG[0]=(short int)(eff*ECG[0]);
iECG[1]=(short int)(eff*ECG[1]);
iECG[2]=(short int)(eff*ECG[2]);
iECG[3]=(short int)(eff*ECG[3]);
iECG[4]=(short int)(eff*ECG[4]);
iECG[5]=(short int)(eff*ECG[5]);
iECG[6]=(short int)(eff*ECG[6]);
iECG[7]=(short int)(eff*ECG[7]);
iECG[8]=(short int)(eff*ECG[8]);
iECG[9]=(short int)(eff*ECG[9]);
iECG[10]=(short int)(eff*ECG[10]);
iECG[11]=(short int)(eff*ECG[11]);
for (j=0;j<12;j++) {
f.Write(&iECG[j],2);
//fprintf(fptime,"%d\n",iECG[j]);//sf
}
//TRACE("\n %3d %5d %f %f %f %f ",i, iECG[6], ECG[6], ECGl,ECGr,ECGf);
}
f.Close();
//fclose(fptime);//sf
}
//-------------------- modified by sf at 2008-4-27 begin -------------------->
//modified
int BSPitmmcount(short int iTime0) {
//void BSPitmm(short int iTime0, short int **tnd,float *hnn, float *endoHnnA, float *endoHnnB, float *endoHnnC, float *epicHnn, float *epicPOT) {
//ASSERT(epicHnn != NULL);
//-------------------- modified by ALF at 2008-8-19 end --------------------<
float aptcalm(short int,short int,short int,short int,short int);
void anfct(short int i, short int j, short int k, float v[3]);
int loopcount=0;
char iCell;
const short int OK_SAV=1;
short int iseqx[12]={ -1, 0, 0, 1, 1, 0, 1, 0, 0,-1,-1, 0 };
short int iseqy[12]={ 0,-1, 0,-1, 0, 1, 0, 1, 0, 1, 0,-1 };
short int iseqz[12]={ 0, 0,-1, 0,-1,-1, 0, 0, 1, 0, 1, 1 };
short int nskip=2;
short int i,j,k,l,ix,iy,iz,icell,l6,jx,jy,jz,jcell;
int nsum,n;
int intvl;
int idpl;
float asd,add,rtmax,gsum,compm,compp,compo,ax,ay,az;
float r1,r3,r5,dr,ds,rv3,bx,by,bz,ECGs;
//float der[NL],ders[NL];
double grad[6];
//float dpl[3];
float posi, posj, posk;
float r2,GRD;
float tmpdpl;
// endocardial
int n0,n1,n2,ni;
//float *surfPOTi,*u1;
short int nhb, eTime;
short int index;
for (nhb=0; nhb<nHB; nhb++) {
i=iHB[0][nhb];
j=iHB[1][nhb];
k=iHB[2][nhb];
for (ni=0;ni<mxcycle;ni++) {
eTime=vHB[ni][nhb];
if (eTime==(short int)(iTime0/3)) {
loopcount++;
}
}
}
for (k=0;k<=NK;k+=nskip) { // i,j // < --> <= August 10,1996
for (j=0;j<NJ;j+=nskip) {
for (i=NI;i>-1;i-=nskip) {
if (k<*(kmin+NI*j+i) || k>*(kmax+NI*j+i)) {
continue;
}
iCell=*(mapCell[k]+NI*j+i);
// +++++++++ special fiber neglected +++++
if (iCell<=1) continue; /*<Comment by ALF> null or SN*/
if (iCell>=15) continue; /*<Comment by ALF> out of define*/
// include fiber conduction
if((iCell>=3)&&(iCell<=6)) continue; /*<Comment by ALF> not AVN HB BB PKJ*/
compo=aptcalm(i,j,k,iCell,iTime0);
// --------- neighberhood search ---------
gsum=(float)0;
for (l=0;l<6;l++) {
compm=(float)0.0;
compp=(float)0.0;
grad[l]=(double)0.0;
ix=i+iseqx[l];
iy=j+iseqy[l];
iz=k+iseqz[l];
if ((ix>=0)&&(ix<NI)&&(iy>=0)&&(iy<NJ)&&(iz>=0)&&(iz<NK)) {
icell=*(mapCell[iz]+iy*NI+ix);
if ((icell>1)&&(icell<15)&&((icell<3)||(icell>6))) {
compm=aptcalm(ix,iy,iz,icell,iTime0);
//if (iTime0 ==3 && compm != -90.) TRACE("\nB %d %d %d %f",ix,iy,iz, compm);
grad[l]+=compm-compo;
}
}
l6=l+6; /*<Comment by ALF> opposite one*/
jx=i+iseqx[l6];
jy=j+iseqy[l6];
jz=k+iseqz[l6];
if ((jx>=0)&&(jx<NI)&&(jy>=0)&&(jy<NJ)&&(jz>=0)&&(jz<NK)) {
jcell=*(mapCell[jz]+jy*NI+jx);
if ((jcell>1)&&(jcell<15)&&((jcell<3)||(jcell>6))) {
compp=aptcalm(jx,jy,jz,jcell,iTime0);
grad[l]+=compo-compp;
}
}
}
for (l=0;l<6;l++)
gsum+=(float)fabs((double)grad[l]);
if (gsum==0) continue;
loopcount++;
}
}
}
return loopcount;
}
#if __DEVICE_EMULATION__
bool InitCUDA(void){return true;}
#else
bool InitCUDA(void)
{
int count = 0;
int i = 0;
cudaGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
cudaSetDevice(i);
printf("CUDA initialized.\n");
return true;
}
#endif
/************************************************************************/
/* Example */
/************************************************************************/
__global__ static void k_dpl_Nepic(short int *k_epicX,short int *k_epicY,short int *k_epicZ,float k_posi,float k_posj,float k_posk,
float k_HRTx0,float k_HRTy0,float k_HRTz0,float *k_dpl,float *k_epicPOTold,
float *k_tm,short int k_Nepic)
{
float ax,ay,az,r1,r2,r3,dr,rv3,tmp1,tmp2,tmp3;
int n=blockDim.x * blockIdx.x + threadIdx.x;
if (n< k_Nepic)
{ //for (n=0; n<Nepic; ++n) {
//ax=HRTx0+epicX[n]*tmswf[0][0]+epicY[n]*tmswf[0][1]+epicZ[n]*tmswf[0][2]-posi;
//ay=HRTy0+epicX[n]*tmswf[1][0]+epicY[n]*tmswf[1][1]+epicZ[n]*tmswf[1][2]-posj;
//az=HRTz0+epicX[n]*tmswf[2][0]+epicY[n]*tmswf[2][1]+epicZ[n]*tmswf[2][2]-posk;
ax=k_HRTx0;
tmp1=*(k_epicX+n) * *(k_tm);
ax=ax+tmp1;
tmp2=*(k_epicY+n) * *(k_tm+1);
ax=ax+tmp2;
tmp3=*(k_epicZ+n) * *(k_tm+2);
ax=ax+tmp3;
ax=ax-k_posi;
ay=k_HRTy0;
tmp1=*(k_epicX+n) * *(k_tm+1*6);
ay=ay+tmp1;
tmp2=*(k_epicY+n) * *(k_tm+1*6+1);
ay=ay+tmp2;
tmp3=*(k_epicZ+n) * *(k_tm+1*6+2);
ay=ay+tmp3;
ay=ay-k_posj;
az=k_HRTz0;
tmp1=*(k_epicX+n) * *(k_tm+2*6);
az=az+tmp1;
tmp2=*(k_epicY+n) * *(k_tm+2*6+1);
az=az+tmp2;
tmp3=*(k_epicZ+n) * *(k_tm+2*6+2);
az=az+tmp3;
az=az-k_posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
//dr=dpl[0]*ax+dpl[1]*ay+dpl[2]*az;
tmp1=k_dpl[0]*ax;
dr=tmp1;
tmp2=k_dpl[1]*ay;
dr+=tmp2;
tmp3=k_dpl[2]*az;
dr+=tmp3;
rv3=1/r3;
*(k_epicPOTold+n)+=dr*rv3;
}
}
__global__ static void k_dpl_Nendo(float k_posi,float k_posj,float k_posk,
float k_HRTx0,float k_HRTy0,float k_HRTz0,int k_NendoB,int k_offset,float *k_dpl,
float *k_endoHnnA,short int *k_endoBx,short int *k_endoBy,short int *k_endoBz,
float *k_tm)
{
float ax,ay,az,r1,r2,r3,dr,rv3,tmp1,tmp2,tmp3;
int n=blockDim.x * blockIdx.x + threadIdx.x;
if (n< k_NendoB)
{
//ax=k_HRTx0+*(k_endoBx+n) * *(k_tm)+*(k_endoBy+n) * *(k_tm+1)+*(k_endoBz+n) * *(k_tm+2)-k_posi;
//ay=k_HRTy0+*(k_endoBx+n) * *(k_tm+1*6)+*(k_endoBy+n) * *(k_tm+1*6+1)+*(k_endoBz+n) * *(k_tm+1*6+2)-k_posj;
//az=k_HRTz0+*(k_endoBx+n) * *(k_tm+2*6)+*(k_endoBy+n) * *(k_tm+2*6+1)+*(k_endoBz+n) * *(k_tm+2*6+2)-k_posk;
ax=k_HRTx0;
tmp1=*(k_endoBx+n) * *(k_tm);
ax=ax+tmp1;
tmp2=*(k_endoBy+n) * *(k_tm+1);
ax=ax+tmp2;
tmp3=*(k_endoBz+n) * *(k_tm+2);
ax=ax+tmp3;
ax=ax-k_posi;
ay=k_HRTy0;
tmp1=*(k_endoBx+n) * *(k_tm+1*6);
ay=ay+tmp1;
tmp2=*(k_endoBy+n) * *(k_tm+1*6+1);
ay=ay+tmp2;
tmp3=*(k_endoBz+n) * *(k_tm+1*6+2);
ay=ay+tmp3;
ay=ay-k_posj;
az=k_HRTz0;
tmp1=*(k_endoBx+n) * *(k_tm+2*6);
az=az+tmp1;
tmp2=*(k_endoBy+n) * *(k_tm+2*6+1);
az=az+tmp2;
tmp3=*(k_endoBz+n) * *(k_tm+2*6+2);
az=az+tmp3;
az=az-k_posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
//dr=k_dpl[0]*ax+k_dpl[1]*ay+k_dpl[2]*az;
tmp1=k_dpl[0]*ax;
dr=tmp1;
tmp2=k_dpl[1]*ay;
dr+=tmp2;
tmp3=k_dpl[2]*az;
dr+=tmp3;
rv3=1/r3;
*(k_endoHnnA+k_offset+n)+=dr*rv3;
};
}
__global__ static void k_dpl_nPos_2(float k_posi,float k_posj,float k_posk,float *k_dpl,float *k_r,float *d_surfPOTi,
short int *d_tnd)
{
float ax,ay,az,r1,r2,r3,dr,rv3;
int n0,n1,n2;
int n=blockDim.x * blockIdx.x + threadIdx.x;
//if (n< ((NL-2)*2))
//{
n0=d_tnd[n]-1;
n1=d_tnd[(NL-2)*2+n]-1;
n2=d_tnd[(NL-2)*2*2+n]-1;
ax=(k_r[n0]+k_r[n1]+k_r[n2])/3-k_posi;
ay=(k_r[NL+n0]+k_r[NL+n1]+k_r[NL+n2])/3-k_posj;
az=(k_r[2*NL+n0]+k_r[2*NL+n1]+k_r[2*NL+n2])/3-k_posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
dr=ax;
dr=dr*k_dpl[0];
dr+=k_dpl[1]*ay;
dr+=k_dpl[2]*az;
rv3=1/r3;
*(d_surfPOTi+n)+=dr*rv3;
//};
}
__global__ void k_dpl_nPos(float k_posi,float k_posj,float k_posk,int k_nPos,float *k_dpl,
float *k_POTi,float *k_der,float *k_r ,float *k_rn )
{
float ax,ay,az,r1,r2,r3,r5,dr,ds,rv3,bx,by,bz,ret_der,ret_POTi;
int n=threadIdx.x;
ax=k_r[n];
ay=k_r[NL+n];
az=k_r[2*NL+n];
ax = ax - k_posi;
ay = ay - k_posj;
az = az - k_posk;
r2=ax*ax+ay*ay+az*az;
r1=(float)sqrt(r2);
r3=(float)(r1*r2);
r5=(float)(r2*r3);
dr=k_dpl[0]*ax+k_dpl[1]*ay+k_dpl[2]*az;
ds=3*dr/r5;
rv3=1/r3;
bx=k_dpl[0]*rv3-ax*ds;
by=k_dpl[1]*rv3-ay*ds;
bz=k_dpl[2]*rv3-az*ds;
//*(k_der+n)+=*(d_rn[0]+n)*bx+*(d_rn[1]+n)*by+*(d_rn[2]+n)*bz;
ret_der = k_der[n];
ret_der += k_rn[n]*bx;
ret_der += k_rn[NL+n]*by;
ret_der += k_rn[2*NL+n]*bz;
k_der[n] = ret_der;
//*(k_POTi+n)+=dr*rv3;
ret_POTi = k_POTi[n];
ret_POTi += dr*rv3;
k_POTi[n] = ret_POTi;
//__syncthreads();
}
extern "C" void gpu_freetransdata()
{
(cudaFree(d_tm));
(cudaFree(d_endoBx));(cudaFree(d_endoBy));(cudaFree(d_endoBz));
(cudaFree(d_endoCx));(cudaFree(d_endoCy));(cudaFree(d_endoCz));
(cudaFree(d_r));
(cudaFree(d_rn));
(cudaFree(d_tnd));
}
//int main(int argc, char** argv)
extern "C" short int cudamain(int argc, char** argv)
{//int i;
fprintf(stdout, "before \n");
fflush(stdout);
if(!InitCUDA()) {
return 0;
}
fflush(stdout);
int count = 0;
short int GPUnumber;
cudaGetDeviceCount(&count);
GPUnumber=count;
//hpc(argc, argv);
printf("CUDA is OK=%d\n",GPUnumber);
return GPUnumber;
//for(i=0;i<3;i++)
// {
// //(cudaFree(d_r[i]));(cudaFree(d_rn[i]));
// (cudaFree(d_tnd[i]))
//};
/* gpu_freetransdata();//These function should be called by one process on one PC
(cudaFree(d_POTi));(cudaFree(d_der));
(cudaFree(d_endoHnnA));(cudaFree(d_surfPOTi));
CUT_EXIT(argc, argv);
*/
}
extern "C" void gpu_transdata(short int g_epicX[Nepic],short int g_epicY[Nepic],short int g_epicZ[Nepic],short int *g_tnd[3],float *g_r[3],float *g_rn[3],short int g_endoBx[NENDO*ND3],short int g_endoBy[NENDO*ND3],short int g_endoBz[NENDO*ND3],short int g_endoCx[NENDO*ND3],short int g_endoCy[NENDO*ND3],short int g_endoCz[NENDO*ND3],float g_tm[3][6])
{ //传送申请只读数据空间,并传递;申请计算用数据空间
int i,j;
//float *d_r[3],*d_rn[3],*d_tm;
float cg_r[NL*3],cg_rn[NL*3];
//if(!InitCUDA()) {
//printf("CUDA error");
// //return 0;
//}
for(i=0;i<3;i++)
for(j=0;j<NL;j++)
{
cg_r[i*NL+j]=*(g_r[i]+j);
cg_rn[i*NL+j]=*(g_rn[i]+j);
}
( cudaMalloc((void**) &d_r, sizeof(float) * NL*3));
( cudaMemcpy(d_r, cg_r, sizeof(float) * NL*3, cudaMemcpyHostToDevice));
( cudaMalloc((void**) &d_rn, sizeof(float) * NL*3));
( cudaMemcpy(d_rn, cg_rn, sizeof(float) * NL*3, cudaMemcpyHostToDevice));
short int cg_tnd[(NL-2)*2*3];
for(i=0;i<3;i++)
for(j=0;j<(NL-2)*2;j++)
{
cg_tnd[i*(NL-2)*2+j]=*(g_tnd[i]+j);
}
( cudaMalloc((void**) &d_tnd, sizeof(short int) * (NL-2)*2*3));
( cudaMemcpy(d_tnd, cg_tnd, sizeof(short int) * (NL-2)*2*3, cudaMemcpyHostToDevice));
//for(i=0;i<3;i++)
//{
// //( cudaMalloc((void**) &d_r[i], sizeof(float) * NL));
// //( cudaMemcpy((d_r[i]), (g_r[i]), sizeof(float) * NL, cudaMemcpyHostToDevice));
// //( cudaMalloc((void**) &d_rn[i], sizeof(float) * NL));
// //( cudaMemcpy((d_rn[i]), (g_rn[i]), sizeof(float) * NL, cudaMemcpyHostToDevice));
// ( cudaMalloc((void**) &d_tnd[i], sizeof(short int) * (NL-2)*2));
// ( cudaMemcpy((d_tnd[i]), (g_tnd[i]), sizeof(short int) * (NL-2)*2, cudaMemcpyHostToDevice));
//};
float cg_tm[3*6];
for(i=0;i<3;i++)
for(j=0;j<6;j++)
{
cg_tm[i*6+j]=*(g_tm[i]+j);
}
( cudaMalloc((void**) &d_tm, sizeof(float) * 3 * 6));
( cudaMemcpy(d_tm, cg_tm, (sizeof(float) * 3 * 6), cudaMemcpyHostToDevice));
( cudaMalloc((void**) &d_epicX, sizeof(short int) * Nepic));
( cudaMalloc((void**) &d_epicY, sizeof(short int) * Nepic));
( cudaMalloc((void**) &d_epicZ, sizeof(short int) * Nepic));
( cudaMemcpy((d_epicX),(g_epicX) , (sizeof(short int) * Nepic), cudaMemcpyHostToDevice));
( cudaMemcpy(d_epicY,g_epicY , sizeof(short int) * Nepic, cudaMemcpyHostToDevice));
( cudaMemcpy(d_epicZ, g_epicZ, sizeof(short int) * Nepic, cudaMemcpyHostToDevice));
( cudaMalloc((void**) &d_endoBx, sizeof(short int) * NENDO*ND3));
( cudaMalloc((void**) &d_endoBy, sizeof(short int) * NENDO*ND3));
( cudaMalloc((void**) &d_endoBz, sizeof(short int) * NENDO*ND3));
( cudaMalloc((void**) &d_endoCx, sizeof(short int) * NENDO*ND3));
( cudaMalloc((void**) &d_endoCy, sizeof(short int) * NENDO*ND3));
( cudaMalloc((void**) &d_endoCz, sizeof(short int) * NENDO*ND3));
( cudaMemcpy((d_endoBx),(g_endoBx) , (sizeof(short int) * NENDO*ND3), cudaMemcpyHostToDevice));
( cudaMemcpy(d_endoBy,g_endoBy , sizeof(short int) * NENDO*ND3, cudaMemcpyHostToDevice));
( cudaMemcpy(d_endoBz, g_endoBz, sizeof(short int) * NENDO*ND3, cudaMemcpyHostToDevice));
( cudaMemcpy(d_endoCx,g_endoCx , sizeof(short int) * NENDO*ND3, cudaMemcpyHostToDevice));
( cudaMemcpy(d_endoCy,g_endoCy , sizeof(short int) * NENDO*ND3, cudaMemcpyHostToDevice));
( cudaMemcpy(d_endoCz,g_endoCz , sizeof(short int) * NENDO*ND3, cudaMemcpyHostToDevice));
//申请计算用数据空间,这样只要一次申请
( cudaMalloc((void**) &d_epicPOTold, sizeof(float) * Nepic));
( cudaMalloc((void**) &d_POTi, sizeof(float) * NL));
( cudaMalloc((void**) &d_der, sizeof(float) * NL));
( cudaMalloc((void**) &d_endoHnnA, sizeof(float) * 2*NENDO*ND3));
( cudaMalloc((void**) &d_surfPOTi, sizeof(float) * (NL-2)*2));
}
//extern "C" void gpu_BSPitmm_Malloc(float *g_POTi,float g_der[NL],float *g_endoHnnA,float *g_surfPOTi)
//{
// ( cudaMalloc((void**) &d_epicPOTold, sizeof(float) * Nepic));
// ( cudaMalloc((void**) &d_POTi, sizeof(float) * NL));
// ( cudaMalloc((void**) &d_der, sizeof(float) * NL));
// ( cudaMalloc((void**) &d_endoHnnA, sizeof(float) * 2*NENDO*ND3));
// ( cudaMalloc((void**) &d_surfPOTi, sizeof(float) * (NL-2)*2));
//}
extern "C" void gpu_BSPitmm_HostToDevice(float *g_POTi,float g_der[NL],float *g_endoHnnA,float *g_surfPOTi)
{
cudaMemset(d_epicPOTold, 0, sizeof(float) * Nepic);
cudaMemset(d_POTi, 0, sizeof(float) * NL);
cudaMemset(d_der, 0, sizeof(float) * NL);
cudaMemset(d_endoHnnA, 0, sizeof(float) * 2*NENDO*ND3);
cudaMemset(d_surfPOTi, 0, sizeof(float) * (NL-2)*2);
//( cudaMemcpy((d_POTi), (g_POTi), sizeof(float) * NL, cudaMemcpyHostToDevice));
//( cudaMemcpy((d_der), (g_der), sizeof(float) * NL, cudaMemcpyHostToDevice));
//( cudaMemcpy((d_endoHnnA), (g_endoHnnA), sizeof(float) * 2*NENDO*ND3, cudaMemcpyHostToDevice));
//( cudaMemcpy((d_surfPOTi), (g_surfPOTi), sizeof(float) * (NL-2)*2, cudaMemcpyHostToDevice));
}
extern "C" void gpu_BSPitmm_DeviceToHost(float *g_epicPOTold,float *g_POTi,float g_der[NL],float *g_endoHnnA,float *g_surfPOTi)
{
( cudaMemcpy((g_epicPOTold), (d_epicPOTold), sizeof(float) * Nepic, cudaMemcpyDeviceToHost));
( cudaMemcpy((g_POTi), (d_POTi), sizeof(float) * NL, cudaMemcpyDeviceToHost));
( cudaMemcpy((g_der), (d_der), sizeof(float) * NL, cudaMemcpyDeviceToHost));
( cudaMemcpy((g_endoHnnA),(d_endoHnnA) , sizeof(float) * 2*NENDO*ND3, cudaMemcpyDeviceToHost));
( cudaMemcpy((g_surfPOTi),(d_surfPOTi) , sizeof(float) * (NL-2)*2, cudaMemcpyDeviceToHost));
}
extern "C" void gpu_dpl_all(short int do_epicPOT,float g_posi,float g_posj,float g_posk,short int g_nPos,float g_dpl[3],float *g_POTi,float g_der[NL],
float g_HRTx0,float g_HRTy0,float g_HRTz0,int g_NendoB,int g_NendoC,
float *g_endoHnnA,short int *g_endoBx,short int *g_endoBy,short int *g_endoBz,float g_tm[3][6],float *g_epicPOTold)
{
float * d_dpl;
( cudaMalloc((void**) &d_dpl, sizeof(float) * 3));
( cudaMemcpy(d_dpl, g_dpl, sizeof(float) * 3, cudaMemcpyHostToDevice));
k_dpl_nPos<<<1, g_nPos>>>(g_posi,g_posj,g_posk,g_nPos,d_dpl,d_POTi,d_der,d_r ,d_rn);
//if (g_offset<100)
//{
k_dpl_Nendo<<<6, 512>>>(g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,g_NendoB,0,d_dpl,d_endoHnnA,d_endoBx,d_endoBy,d_endoBz,d_tm);
//}
//else
//{
k_dpl_Nendo<<<6, 512>>>(g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,g_NendoC,g_NendoB,d_dpl,d_endoHnnA,d_endoCx,d_endoCy,d_endoCz,d_tm);
//};
k_dpl_nPos_2<<<2, 342>>>(g_posi,g_posj,g_posk,d_dpl,d_r,d_surfPOTi,d_tnd);
if (do_epicPOT==1) k_dpl_Nepic<<<Nepic/512+1, 512>>>(d_epicX,d_epicY,d_epicZ,g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,d_dpl,d_epicPOTold,d_tm,Nepic);
(cudaFree(d_dpl));
}
extern "C" void gpu_dpl_Nepic(float g_posi,float g_posj,float g_posk,float g_HRTx0,float g_HRTy0,float g_HRTz0,
float g_dpl[3],float g_tm[3][6],float *g_epicPOTold)
{
float * d_dpl;
( cudaMalloc((void**) &d_dpl, sizeof(float) * 3));
( cudaMemcpy(d_dpl, g_dpl, sizeof(float) * 3, cudaMemcpyHostToDevice));
k_dpl_Nepic<<<Nepic/512+1, 512>>>(d_epicX,d_epicY,d_epicZ,g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,d_dpl,d_epicPOTold,d_tm,Nepic);
(cudaFree(d_dpl));
}
extern "C" void gpu_dpl_Nendo(float g_posi,float g_posj,float g_posk,float g_HRTx0,float g_HRTy0,float g_HRTz0,
int g_NendoBC,int g_offset,float g_dpl[3],float *g_endoHnnA,
short int *g_endoBx,short int *g_endoBy,short int *g_endoBz,float g_tm[3][6])
{
//k_dpl_Nendo<<<1, g_NendoBC>>>(g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,g_NendoBC,g_offset,g_dpl,d_endoHnnA,d_endoBx,d_endoBy,d_endoBz,d_tm);
// numberofb=g_NendoBC;
//while(g_NendoBC!=0)
float * d_dpl;
( cudaMalloc((void**) &d_dpl, sizeof(float) * 3));
( cudaMemcpy(d_dpl, g_dpl, sizeof(float) * 3, cudaMemcpyHostToDevice));
if (g_offset<100)
{
k_dpl_Nendo<<<6, 512>>>(g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,g_NendoBC,g_offset,d_dpl,d_endoHnnA,d_endoBx,d_endoBy,d_endoBz,d_tm);
}
else
{ k_dpl_Nendo<<<6, 512>>>(g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,g_NendoBC,g_offset,d_dpl,d_endoHnnA,d_endoCx,d_endoCy,d_endoCz,d_tm);
};
(cudaFree(d_dpl));
//k_dpl_Nendo<<<1, (g_NendoBC-512*5)>>>(g_posi,g_posj,g_posk,g_HRTx0,g_HRTy0,g_HRTz0,g_NendoBC,(g_offset+512*5),g_dpl,d_endoHnnA,d_endoBx,d_endoBy,d_endoBz,d_tm);
}
extern "C" void gpu_dpl_nPos_2(float g_posi,float g_posj,float g_posk,float g_dpl[3])
{
float * d_dpl;
( cudaMalloc((void**) &d_dpl, sizeof(float) * 3));
( cudaMemcpy(d_dpl, g_dpl, sizeof(float) * 3, cudaMemcpyHostToDevice));
k_dpl_nPos_2<<<2, 342>>>(g_posi,g_posj,g_posk,d_dpl,d_r,d_surfPOTi,d_tnd);
(cudaFree(d_dpl));
}
extern "C" void gpu_dpl_nPos(float g_posi,float g_posj,float g_posk,short int g_nPos,float g_dpl[3],float *g_POTi,float g_der[NL])
{
float * d_dpl;
( cudaMalloc((void**) &d_dpl, sizeof(float) * 3));
( cudaMemcpy(d_dpl, g_dpl, sizeof(float) * 3, cudaMemcpyHostToDevice));
//float *d_POTi=0, *d_der=0;
// ( cudaMalloc((void**) &d_POTi, sizeof(float) * NL));
// ( cudaMalloc((void**) &d_der, sizeof(float) * NL));
// ( cudaMemcpy((d_POTi), (g_POTi), sizeof(float) * NL, cudaMemcpyHostToDevice));
// ( cudaMemcpy((d_der), (g_der), sizeof(float) * NL, cudaMemcpyHostToDevice));
k_dpl_nPos<<<1, g_nPos>>>(g_posi,g_posj,g_posk,g_nPos,d_dpl,d_POTi,d_der,d_r ,d_rn);
(cudaFree(d_dpl));
//k_dpl_nPos<<<1, g_nPos>>>(g_posi,g_posj,g_posk,g_nPos,g_dpl,d_POTi,d_der,d_r,d_rn);
//( cudaMemcpy((g_POTi), (d_POTi), sizeof(float) * NL, cudaMemcpyDeviceToHost));
//( cudaMemcpy((g_der), (d_der), sizeof(float) * NL, cudaMemcpyDeviceToHost));
//(cudaFree(d_der));
//(cudaFree(d_POTi));
//extern "C" void dplpro(float *POTi,const short int NL, const float **r)
// float *d_data=0,*d_r[3],;
// printf("%f,%f\n", *POTi,*(POTi+1));
// for(int i=0;i<3,i++) ( cudaMalloc((void**) &d_data, sizeof(float) * NL*4));
// ( cudaMalloc((void**) &d_data, sizeof(float) * NL*4));
// ( cudaMemcpy(d_data,POTi , sizeof(float) * NL*4, cudaMemcpyHostToDevice));
// dpl<<<1, 16>>>(d_data);
// ( cudaMemcpy(POTi, d_data, sizeof(float) * NL*4, cudaMemcpyDeviceToHost));
// printf("%f,%f\n", *POTi,*(POTi+1));
//
//
}
void XCTcalm(void) {
// FILE *fp;
void wtXCTm(short int,short int,short int,short int);
void bbDLYm(short int,short int,short int);
void rdXCTm(short int,short int,short int,short int);
short int itmp, tmp;
short int iStm,ires,irp,irel,ist,kBB;
float phsft,mxDLY,mACCl,icross,delt;
char mCell,iCell,kCell;
short int *iACTv[4];
short int *iACTvOld[4];
short int *jACTv[4];
short int *kACTv[4];
short int *iXCT[NK];
short int *iXCTapd[NK];
short int *iXCTOld[NK];
short int iseqx[12]={-1,-1, 0, 0, 1, 0, 1, 1, 0, 0,-1, 0};
short int iseqy[12]={ 0, 1, 1, 0, 0, 1, 0,-1,-1, 0, 0,-1};
short int iseqz[12]={ 0, 0, 0,-1,-1,-1, 0, 0, 0, 1, 1, 1};
short int ix,iy,iz,jx,jy,jz,iv,l;
short int jdist,jx0,jy0,jz0,is,ICL,ivel;
short int iSTOP, iS1S2, dS1S2Old, iCell5Ex;
long i,j,k,nACTv,mACTv,nACTvOld;
long nblck,nStep,nbrch;
// >>>>>>> aniso >>>>>>
float xani,yani,zani,dani,elp;
float dxani,dyani,dzani;
short int itms1=0;
// ---- for vtr aniso use
// storing the ellipsoid propagation times ---
//--------- maximum excitation time Step: maxXctStep -------------
for(i=0;i<4;i++) {
iACTv[i] = (short int *) malloc(50000*ND3*2);
iACTvOld[i] = (short int *) malloc(50000*ND3*2);
jACTv[i] = (short int *) malloc(50000*ND3*2);
kACTv[i] = (short int *) malloc(50000*ND3*2);
if((iACTv[i]==NULL)||(iACTvOld[i]==NULL)||
(jACTv[i]==NULL)||(kACTv[i]==NULL)) {
MessageBox(NULL,"Out of memory !",NULL,MB_OK);
return;
}
}
for(i=0;i<NK;i++) {
iXCT[i] = (short int *) malloc(NI*NJ*2);
iXCTapd[i] = (short int *) malloc(NI*NJ*2);
iXCTOld[i] = (short int *) malloc(NI*NJ*2);
if((iXCT[i]==NULL)||(iXCTOld[i]==NULL)) {
MessageBox(NULL,"Out of memory !",NULL,MB_OK);
return;
}
}
for(i=0;i<4;i++) {
for(j=0;j<50000*ND3;j++) {
*(iACTv[i]+j)=0;
*(iACTvOld[i]+j)=0;
*(jACTv[i]+j)=0;
*(kACTv[i]+j)=0;
}
}
// --- file mapXCT is initialized with INFTIME ----
for(i=0;i<NCYCL;i++) {
for(j=0;j<50000*ND3;j++) {
*(mapXCTm[i]+j)=INFTIME;
}
}
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=0;i<NI;i++) {
*(iXCT[k]+j*NI+i)=INFTIME;
*(iXCTapd[k]+j*NI+i)=0;
*(iXCTOld[k]+j*NI+i)=INFTIME;
}
}
}
mxcycle=0;
short int tested[NCELL];
for(i=0;i<NCELL;i++)
tested[i]=0;
for(i=0;i<nttl;i++) {
jx=ipttl[0][i]; /*<Comment by ALF> pos of ith cell*/
jy=ipttl[1][i];
jz=ipttl[2][i];
iCell=*(mapCell[jz]+jy*NI+jx); /*<Comment by ALF> cell type index */
if(tested[iCell-1]==0)
{*(iparm+(iCell-1)*NPARM+18)+=ipttl[3][i];tested[iCell-1]=1;
if (iCell!=1) {*(iparm+(1-1)*NPARM+18)+=ipttl[3][i];//maxXctStep+=ipttl[3][i];
}
}
//TRACE("\nNTTL (%3d %3d %3d) %2d",jx,jy,jz,iCell);
// set pacemake time of no. 5 cells
if (iCell==5) {
ipstm[0][i]=100*ND/(ipttl[3][i]+1);
if((ipstm[0][i]*ipttl[3][i])<100*ND) ipstm[0][i]+=1;
//ipstm[0][i]=100/(ipttl[3][i]+1);
//if((ipstm[0][i]*ipttl[3][i])<100) ipstm[0][i]+=1;
//TRACE("\nCell 5, (%d %d %d) %d %d",jx,jy,jz, ipttl[3][i],ipstm[0][i]);
continue;
}
// iparm(n,18) = BCL basic cycle length (ms) of pacing
// iparm(n,20) = inc increament of BCL(ms/cycle)
ipstm[0][i]=*(iparm+(iCell-1)*NPARM+17);
ipstm[1][i]=*(iparm+(iCell-1)*NPARM+19);
ipstm[2][i]=0;
}
nblck=0;
ic=0;
nACTv=0;
iS1S2=0;
iCell5Ex=0;
// ------ stimulus: pacemaker spontanous firing -------
while (1) {
// In this loop, ipttl[3][i] is mainly used to
// decide ipstm[0][i] and itself
jx=0;
jy=0;
jz=0;
iStm=0;
excited=0;
for (i=0;i<nttl;i++) {
jx=ipttl[0][i];
jy=ipttl[1][i];
jz=ipttl[2][i];
iStm=ipttl[3][i];
iCell=*(mapCell[jz]+jy*NI+jx);
//TRACE("\nStimulus (%3d %3d %3d)%2d %d %d",jx,jy,jz,iCell,iStm, mxcycle);
//TRACE("\nbreak1 mxcycle=%d NCYCL=%d ic=%d iCell=%d, iStm=%d, mS2BN=%d,ipstm=%d",mxcycle, NCYCL,ic, iCell, iStm,*(iparm+(iCell-1)*NPARM+18),ipstm[0][i]);
if (iCell==5) continue; // ignore BB
if (iStm != ic) continue;
// ic: i-th time Step
// nACTv: number of exitation cells at ic time but cellType != 5 (BB)
// --- end ---
//TRACE("\nbreak1 mxcycle=%d NCYCL=%d ic=%d iCell=%d, iStm=%d",mxcycle, NCYCL,ic, iCell, iStm);
nACTv=nACTv+1;
*(iACTv[0]+nACTv)=jx;
*(iACTv[1]+nACTv)=jy;
*(iACTv[2]+nACTv)=jz;
*(iACTv[3]+nACTv)=*(iparm+(iCell-1)*NPARM+31); /*<Comment by ALF> iparm store each cell's parameters*/
// iparm(n,32): conduction speed
wtXCTm(ic,jx,jy,jz);
//if (jx==101 && jy==77 && jz==6) TRACE("\nA mxcycle=%d at ic=%d, iCell=%d",mxcycle,ic,iCell);
//if (iCell <3) TRACE("\nA %d %d %d %d %d %d",iCell,jx,jy,jz,ic,nACTv);
// write to file
// mxcycle: maximum cycle
if(mxcycle>=NCYCL) {
break;
}
// --- store current time to iXCT and last time to iXCTOld -->
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx); // init is INFTIME
*(iXCT[jz]+jy*NI+jx)=ic;
excited=1;
// Update ipttl[3][i]
// iparm(n,18) = BCL: basic cycle length (ms) of pacing
// Normally, only SN has this parameter > 0
/*if(*(iparm+(iCell-1)*NPARM+17)>0) {
if ((iS1S2==1) && (mS2BN>1)) {
itmp=ipttl[3][i]+mS2CL;
mS2BN--;
} else {
itmp=ipttl[3][i]+ipstm[0][i];
}
dS1S2Old=ipstm[2][i];
ipstm[0][i] = ipstm[0][i] + ipstm[1][i];
iCell5Ex=0;
// ipstm[1][i] is the step
// iparm(n,19) = pBN: beat number
// judge by ipttl[3][i]
if(itmp>*(iparm+(iCell-1)*NPARM+18)) continue;
if ((mS2ST/3 > ipttl[3][i]) &&(mS2ST/3 < itmp)) {
ipttl[3][i]=(short int)(mS2ST/3);
iS1S2=1;
} else {
ipttl[3][i]=itmp;
}
ipstm[2][i]=itmp-ipttl[3][i];
//TRACE("\nTime=%d, %d, %d, %d, %d %d",ic,itmp,ipttl[3][i],dS1S2Old, ipstm[0][i],ipstm[1][i]);
continue;
}*/
if(*(iparm+(iCell-1)*NPARM+17)>0) {
if (iCell==1) {
itmp=ipttl[3][i]+ipstm[0][i];
dS1S2Old=ipstm[2][i];
ipstm[0][i] = ipstm[0][i] + ipstm[1][i];
iCell5Ex=0;
if(itmp>*(iparm+(iCell-1)*NPARM+18)) continue;
ipttl[3][i]=itmp; continue;}
else
{
itmp=ipttl[3][i]+ipstm[0][i];
dS1S2Old=ipstm[2][i];
ipstm[0][i] = ipstm[0][i] + ipstm[1][i];
iCell5Ex=0;
if(itmp>*(iparm+(iCell-1)*NPARM+18)-ipstm[0][i]+3) continue;
ipttl[3][i]=itmp;
}
continue;
}
// iparm(n,24) = ICL: intrinsic cycle length(ms)
ipttl[3][i] = ipttl[3][i] + *(iparm+(iCell-1)*NPARM+23);
}
// ---- display the excitation number ----
// go to next Step
nblck = nblck + nACTv;
//TRACE("\nmxcycle =%d Step=%3d, number=%ld nblck=%ld ",mxcycle,ic,nACTv, nblck);
ic = ic + 1;
//TRACE("\nbreak2 ic=%d maxXctStep=%d ",ic, maxXctStep);
if (ic>=maxXctStep) break;
if (nACTv == 0) continue;
/**
* very important
*/
// --------- propagation (2000)------------>
nACTvOld=0;
// nACTv: at moment t, the number of excited cells
for (i=1;i<=nACTv;i++) {
excited=1;
ix=*(iACTv[0]+i);
iy=*(iACTv[1]+i);
iz=*(iACTv[2]+i);
iv=*(iACTv[3]+i);
iCell=*(mapCell[iz]+iy*NI+ix);
//if (ix == 64 && iy == 50 && iz == 64) TRACE("\nB AVN %d",iCell);
//----------- low conduction speed part ----------->
// iparm(n,32): conduction speed
if (*(iparm+(iCell-1)*NPARM+31)<=0) continue;
if (iCell==5) iCell5Ex=1;
//if (iCell==8) TRACE("\nCell=8 %d, %d, %d, ic=%d, %d %d",ix,iy,iz,ic,iv,mBCL);
// 100 = Conduction Speed of ATR?
if (iv<100) {
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=ix;
*(iACTvOld[1]+nACTvOld)=iy;
*(iACTvOld[2]+nACTvOld)=iz;
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)+*(mapSpeed[iz]+iy*NI+ix); //added by zhu
if (iCell==5) {
/*ibbDLY=0;
bbDLYm(ix,iy,iz);
if (ibbDLY>0)
*(iACTvOld[3]+nACTvOld)=iv+ibbDLY;
TRACE("\nBB, %d",*(iACTvOld[3]+nACTvOld));
*/
ibbDLY=0;
// Add for BB interval by hui wang
ibbSTEP=0;
bbDLYm(ix,iy,iz);
// End of add for BB interval by hui wang, modified by zhu
if (ibbDLY>0) {ibbSTEP+=nbbSTEP;ibbDLY=100*ND/(ibbSTEP+1);}
if(ibbDLY>0 && (ibbDLY*ibbSTEP)<100*ND) ibbDLY+=1;
if (ibbDLY>0)
*(iACTvOld[3]+nACTvOld)=iv+ibbDLY;
else
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31);
continue;
}
/*if (iCell==3 || iCell==6) {
if (*(iXCTOld[iz]+iy*NI+ix)==INFTIME)
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31);
else {
irel = *(iXCT[iz]+iy*NI+ix)-*(iXCTOld[iz]+iy*NI+ix)-(*(iparm+NPARM*(iCell-1)+4)+*(mapAPD[iz]+iy*NI+ix))/3;
//irel = *(iXCT[iz]+iy*NI+ix)-*(iXCTOld[iz]+iy*NI+ix)-(*(iparm+NPARM*(iCell-1)+4))/3;
irel = 3*irel;
if (irel<*(iparm+NPARM*(iCell-1)+5)) {
tmp=100+*(iparm+NPARM*(iCell-1)+32)
-irel*(*(iparm+NPARM*(iCell-1)+32))/(*(iparm+NPARM*(iCell-1)+5));
if (tmp!=0) {
ivel = 100*(*(iparm+NPARM*(iCell-1)+31))/tmp;
} else {
ivel=*(iparm+NPARM*(iCell-1)+31);
}
} else {
// <--- time of RRP stored in iparm(6) ---
ivel=*(iparm+NPARM*(iCell-1)+31);
}
*(iACTvOld[3]+nACTvOld)=iv+ivel;}
}*/
/*else if (iCell==3) {
if (iCell5Ex==0) {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)- dS1S2Old/20;
TRACE("\nCell=3 E dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else {
if (mBCL<600&&dS1S2Old<140/3) {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)- (dS1S2Old+67)/33;
TRACE("\nCell=3 A dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else if (mBCL<600&&dS1S2Old>=140/3) {
*(iACTvOld[3]+nACTvOld)=iv;
TRACE("\nCell=3 B dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else if (mBCL>=600&&dS1S2Old<=210/3) {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31);
TRACE("\nCell=3 C dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
} else {
*(iACTvOld[3]+nACTvOld)=iv+*(iparm+NPARM*(iCell-1)+31)- dS1S2Old/12;
TRACE("\nCell=3 D dS1S2Old %d, %d, ic=%d, %d %d",dS1S2Old,*(iACTvOld[3]+nACTvOld),ic,iv,mBCL);
}
}
}*/
continue;
}
// ------- neighbourhood search (2100) -------->
// go to iv > 100 situation and set ires = the part of iv < 100
ires=iv-100*(int)(iv/100);
for (l=0;l<12;l++) {
jx=ix+iseqx[l];
if ((jx<=-1)||(jx>(NI-1))) continue; // >=0 <NI
jy=iy+iseqy[l];
if ((jy<=-1)||(jy>(NJ-1))) continue; // >=0 <NJ
jz=iz+iseqz[l];
if ((jz<=-1)||(jz>(NK-1))) continue; // >=0 <NK
// >>>>> aniso: within the ellpisoid ? >>>>>>>>>>>>
if (ANISO==1 && iCell==7) {
dani=local(ix,iy,iz);
//TRACE("\nx,y,z,dani, %2d %2d %2d %f",ix,iy,iz,dani);
// -- if can't solve local coordinates, treat as isotropic -->
if (dani > 0.0001) {
//lctran(iseqx[l],iseqy[l],iseqz[l],dani,xani,yani,zani);
xani=iseqx[l]*tmswf[0][0]+iseqy[l]*tmswf[0][1]+iseqz[l]*tmswf[0][2];
yani=iseqx[l]*tmswf[1][0]+iseqy[l]*tmswf[1][1]+iseqz[l]*tmswf[1][2];
zani=iseqx[l]*tmswf[2][0]+iseqy[l]*tmswf[2][1]+iseqz[l]*tmswf[2][2];
dxani=xani*yaxis[1]*zaxis[2]+yani*yaxis[2]*zaxis[0]
+zani*yaxis[0]*zaxis[1]-zani*yaxis[1]*zaxis[0]
-xani*yaxis[2]*zaxis[1]-yani*yaxis[0]*zaxis[2];
dyani=xaxis[0]*yani*zaxis[2]+xaxis[1]*zani*zaxis[0]
+xaxis[2]*xani*zaxis[1]-xaxis[2]*yani*zaxis[0]
-xaxis[0]*zani*zaxis[1]-xaxis[1]*xani*zaxis[2];
dzani=xaxis[0]*yaxis[1]*zani+xaxis[1]*yaxis[2]*xani
+xaxis[2]*yaxis[0]*yani-xaxis[2]*yaxis[1]*xani
-xaxis[0]*yaxis[2]*yani-xaxis[1]*yaxis[0]*zani;
xani=dxani/dani;
yani=dyani/dani;
zani=dzani/dani;
// itms=maps(ix,iy,iz)+1
//TRACE("\nd %f %f %f %f",dxani,xani,yaxis[0],zaxis[0]);
itms1=*(iXCTapd[iz]+iy*NI+ix);
elp=xani*xani/vt2[itms1]+
yani*yani/vt2[itms1]+
zani*zani/vl2[itms1];
// write(0,*) x,y,z,elp
// TRACE("\n %d %f",itms1,elp);
if (elp > 1.0) continue;
}
}
// <<<<<<<<<<<<<<<<<<<< aniso <<<<<<<<<<<<<<<<<<<
mCell=*(mapCell[jz]+jy*NI+jx);
if ((iCell<=7)&&(mCell<=7)&&(((iCell-mCell)>1)||
((iCell-mCell)<-1))) continue;
if ((*(iparm+NPARM*(mCell-1)+33)>0)&&( (mCell>7 && iCell>7 && mCell!=iCell) || (mCell<=7 && iCell>mCell) || (mCell>7 && !(iCell==mCell || iCell==2)))) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)>0)&&( (mCell<=7 && iCell>mCell) || (mCell>7 && !(iCell==mCell || iCell==2)))) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)>0)&&(iCell>mCell)) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)<0)&&(iCell<mCell)) continue;
//if ((*(iparm+NPARM*(mCell-1)+33)<0)&&( (mCell<=7 && iCell<mCell) || (mCell>7 && !(iCell==mCell || iCell==7)))) continue;
if ((*(iparm+NPARM*(mCell-1)+33)<0)&&( (mCell>7 && iCell>7 && mCell!=iCell) || (mCell<=7 && iCell<mCell) || (mCell>7 && !(iCell==mCell || iCell==7)))) continue;
//if (jx == 64 && jy == 50 && jz == 64) TRACE("\nC AVN %d",mCell);
if (mCell<=0) continue; // continue;
if (mCell>=15) continue; // continue;
if (mCell==5) iCell5Ex=1;
//if (iCell==8) TRACE("\nCell=8 %d, %d, %d, ic=%d, %d %d",jx,jy,jz,ic);;
// --- coupling interval ------>
idltt=ic-*(iXCT[jz]+jy*NI+jx);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) idltt=INFTIME;
// --- change in cycle length ------>
idltc=idltt+*(iXCTOld[jz]+jy*NI+jx)-*(iXCT[jz]+jy*NI+jx);
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME) idltc=0;
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
*(iparm+(1-1)*NPARM+23)>0)
idltc = ic-*(iXCT[jz]+jy*NI+jx)-*(iparm+(1-1)*NPARM+23);
// rdXCT(ic,jx,jy,jz);
// irp = time in phase 2 + mapACT/3 +plateau of potential in phase 3 *idltc/100
// --- absolute refractory period ------>
//irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3
// +*(iparm+(mCell-1)*NPARM+10)*idltc/100;
irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3;
//if (mCell==6)
// irp=(*(iparm+NPARM*(mCell-1)+4))/3;
irel=idltt-irp;
// ++++++++ in absolute refractory period ? +++++++
if (irel<=0) continue;
//if (*(mapAPD[jz]+jy*NI+jx)>20) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>20 && idltc<0) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>30 && idltc<0) idltc = 3*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>30 && idltc>0) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>40 && idltc>0) idltc = 2*idltc;
//if (*(mapAPD[jz]+jy*NI+jx)>40 && idltc>0) idltc = 2*idltc;
//if (mCell==3) TRACE("\nCell=3 %d, %d, %d,%d,%d,%d",irp,ic,*(iXCT[jz]+jy*NI+jx),*(mapAPD[jz]+jy*NI+jx),*(iparm+(mCell-1)*NPARM+10),idltc);
*(mapAPD[jz]+jy*NI+jx) += *(iparm+(mCell-1)*NPARM+10)*idltc*3/100; //added by Zhu
//if (mCell==3) TRACE("\nCell=3 %d, %d, %d",*(mapAPD[jz]+jy*NI+jx)/3,idltc,irp);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME && mCell==3) {irel=INFTIME;*(mapAPD[jz]+jy*NI+jx)=0;}
// --- find automaticity in stimul data ----
// iparm(n,24), ICL: intrinsic cycle length (ms)
iSTOP =0;
if (*(iparm+NPARM*(mCell-1)+23)>0) { // !=0 August 10, 1996
// <--- next touch time should be beyound ARP of the cell --
for (is=0;is<nttl;is++) {
if (jx!=ipttl[0][is]) continue;
if (jy!=ipttl[1][is]) continue;
if (jz!=ipttl[2][is]) continue;
// --- iparm(23) used for adjusting intermediate change
// of EP intrinsic cycle length --->
ICL = *(iparm+NPARM*(mCell-1)+23);
ist = ic-*(iXCT[jz]+jy*NI+jx);
// PRT: protection indicator
// --- no protection ---->
if (*(iparm+NPARM*(mCell-1)+24)==0) {
if (ist<=irp) continue; //{iSTOP=1;break;}
//if (iSTOP==1)
ipttl[3][is]=ic+ICL; // ICL/3
/******************/
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=jx;
*(iACTvOld[1]+nACTvOld)=jy;
*(iACTvOld[2]+nACTvOld)=jz;
*(iACTvOld[3]+nACTvOld)=*(iparm+NPARM*(mCell-1)+31)+ires;
wtXCTm(ic,jx,jy,jz);
if (mxcycle>=NCYCL) {iSTOP=1;break;}
//if (ic==*(iXCT[jz]+jy*NI+jx)) continue;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;//added to by zhu
irel=0;
excited=1;
TRACE("\n %d, %d %d %d %d %d",*(iXCTOld[jz]+jy*NI+jx),*(iXCT[jz]+jy*NI+jx),ic,jx,jy,jz);
/******************/
//iSTOP=1;
continue; //break; // rewrite condition
}
if (idltt==INFTIME) continue;
//ist = ic-*(iXCT[jz]+jy*NI+jx);
// if (ist<=irp) goto loop21; // August 10, 1996
if (ist<=irp) continue; //{iSTOP=1;break;}
phsft =(float)100.*(idltt/ICL);
mxDLY =(float)*(iparm+NPARM*(mCell-1)+25);
mACCl =(float)*(iparm+NPARM*(mCell-1)+26);
if (mxDLY == 0 && mACCl == 0) continue;
icross=(float)*(iparm+NPARM*(mCell-1)+27);
if (icross == 0 || icross == 100) continue;
if (phsft<=icross)
delt=phsft*mxDLY/icross;
else
delt=mACCl-(phsft-icross)*mACCl/(100-icross);
// -- store touch time --->
// -- modify next stimulating time --->
ipttl[3][is]=ipttl[3][is]+(int)(ICL*delt/100);
//TRACE("\ntime=%4d,ixt=%4d,idltt=%4d,icl=%4d,phsft=%4d,intermediate=%4d",
// ic, *(iXCT[jz]+jy*NI+jx),idltt,ICL, (int)phsft, ipttl[3][is]);
// change value after each touch time
// avoiding from successive modification by surrounding cells
if (ic==*(iXCT[jz]+jy*NI+jx)) continue;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
irel=0;
excited=1;
//iSTOP=1;
continue; //break; // rewrite condition
}
}
if (iSTOP==1) continue;
if (irel==0) continue;
// +++++ special processing for BB +++++
if (mCell==5) {
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
// Add for BB interval by hui wang modified by Zhu
// variable ibbSTEP, nbbSTEP are added to store steps by first BB
// ibbSTEP is a function in bbDLYm(i,j,k)
nbbSTEP=0;
ibbDLY=0;
ibbSTEP=0;
bbDLYm(jx,jy,jz);
nbbSTEP=ibbSTEP;
// end of add for BB interval by hui wang
//ic+=10; // add by hw, BB interval
//TRACE("\n nHB = %d, ic= %d",nHB,ic);
for(kBB=0;kBB<nBB;kBB++) {
jx=iBB[0][kBB];
jy=iBB[1][kBB];
jz=iBB[2][kBB];
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=jx;
*(iACTvOld[1]+nACTvOld)=jy;
*(iACTvOld[2]+nACTvOld)=jz;
//*(iACTvOld[3]+nACTvOld)=100;
ibbDLY=0;
// Add for BB interval by hui wang,modified by zhu
ibbSTEP=0;
bbDLYm(jx,jy,jz);
ibbSTEP+=nbbSTEP;
ibbDLY=100*ND/(ibbSTEP+1);
if((ibbDLY*ibbSTEP)<100*ND) ibbDLY+=1;
// End of add for BB interval by hui wang
*(iACTvOld[3]+nACTvOld)=ibbDLY;
wtXCTm(ic,jx,jy,jz);
//if (jx==101 && jy==77 && jz==6) TRACE("\nB mxcycle=%d at ic=%d, iCell=%d",mxcycle,ic,mCell);
//if (mCell >2 && mCell <6) TRACE("\nB %d %d %d %d %d %d",mCell,jx,jy,jz,ic,nACTvOld);
if (mxcycle>=NCYCL) break;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
excited=1;
}
continue;
}
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=jx;
*(iACTvOld[1]+nACTvOld)=jy;
*(iACTvOld[2]+nACTvOld)=jz;
wtXCTm(ic,jx,jy,jz);
//TRACE("\nbreak3 mxcycle=%d NCYCL=%d ",mxcycle, NCYCL);
if (mxcycle>=NCYCL) break;
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
irel = 3*irel;
//if (*(iXCTOld[jz]+jy*NI+jx)!=INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
// irel>*(iparm+NPARM*(mCell-1)+5))
// *(mapAPD[jz]+jy*NI+jx)=0;
// time of RRP stored in iparm(6)
if ((irel)<*(iparm+NPARM*(mCell-1)+5)) {
tmp=100+*(iparm+NPARM*(mCell-1)+32)
-irel*(*(iparm+NPARM*(mCell-1)+32))/(*(iparm+NPARM*(mCell-1)+5));
if (tmp!=0) {
ivel = 100*(*(iparm+NPARM*(mCell-1)+31))/tmp;
} else {
ivel=*(iparm+NPARM*(mCell-1)+31);
}
} else {
// <--- time of RRP stored in iparm(6) ---
ivel=*(iparm+NPARM*(mCell-1)+31);
}
*(mapSpeed[jz]+jy*NI+jx)=ivel-*(iparm+NPARM*(mCell-1)+31);//added by Zhu
// test results
//TRACE("\nmcell=%4d,ic=%4d,idltt=%4d,idltc=%4d,ivel=%4d",mCell,ic,idltt,idltc,ivel);
if (iCell!=mCell) {
if (mCell == 5) {
bbDLYm(jx,jy,jz);
*(iACTvOld[3]+nACTvOld)=ibbDLY;
//TRACE("\n BB2=%d, %d %d (%d %d %d) ic=%d ",*(iACTvOld[3]+nACTvOld),iv,ibbDLY,ix,iy,iz, ic);
continue;
}
*(iACTvOld[3]+nACTvOld)=ivel;
continue;
}
*(iACTvOld[3]+nACTvOld)=ivel+ires;
}
// <------- END of neighbourhood search (2100) -----
// >>>>>>>> anisotropy >>>>>
if (ANISO==1 && iCell == 7) {
// ltrat==2;
if (*(iXCTapd[iz]+iy*NI+ix) < 2) {
nACTvOld=nACTvOld+1;
*(iACTvOld[0]+nACTvOld)=ix;
*(iACTvOld[1]+nACTvOld)=iy;
*(iACTvOld[2]+nACTvOld)=iz;
//*(iACTvOld[3]+nACTvOld)=ires+
// *(iparm+NPARM*(iCell-1)+31);
*(iACTvOld[3]+nACTvOld)=ires+
*(iparm+NPARM*(iCell-1)+31)+*(mapSpeed[iz]+iy*NI+ix);
*(iXCTapd[iz]+iy*NI+ix)+=1;
} else {
*(iXCTapd[iz]+iy*NI+ix)=0;
}
}
// <<<<<<<<<<<
}
// <------- END of propagation (2000) -----
// +++++++++++ for high speed ++++++++
mACTv=nACTvOld;
// ------- propagation (1000) -------->
for(i=1;i<=nACTvOld;i++) {
idist=(int)(*(iACTvOld[3]+i)/100);
if (idist<2) continue;
*(jACTv[0]+1)=*(iACTvOld[0]+i);
*(jACTv[1]+1)=*(iACTvOld[1]+i);
*(jACTv[2]+1)=*(iACTvOld[2]+i);
ires=*(iACTvOld[3]+i)-idist*100;
nStep=0;
nbrch=1;
jdist=1;
while (1) {
for (j=1;j<=nbrch;j++) {
jx0=*(jACTv[0]+j);
jy0=*(jACTv[1]+j);
jz0=*(jACTv[2]+j);
mCell=*(mapCell[jz0]+jy0*NI+jx0);
if (mCell==5) iCell5Ex=1;
for (l=0;l<12;l++) {
jx=jx0+iseqx[l];
if ((jx<=-1)||(jx>(NI-1))) continue; // <0 or >=NI
jy=jy0+iseqy[l];
if ((jy<=-1)||(jy>(NJ-1))) continue; // <0 or >=NJ
jz=jz0+iseqz[l];
if ((jz<=-1)||(jz>(NK-1))) continue; // <0 or >=NK
kCell = *(mapCell[jz]+jy*NI+jx);
//if (jx == 64 && jy == 50 && jz == 64) TRACE("\nE AVN %d",kCell);
if (kCell != mCell) continue;
//++++++++ in effective refractory period ? +++++++
// >>>>> aniso: within the ellpisoid ? >>>>>>>>>>>>
if (ANISO==1 && mCell==7) {
dani=local(jx0,jy0,jz0);
//TRACE("\nx,y,z,dani, %2d %2d %2d %f",ix,iy,iz,dani);
// -- if can't solve local coordinates, treat as isotropic -->
if (dani > 0.0001) {
//lctran(iseqx[l],iseqy[l],iseqz[l],dani,xani,yani,zani);
xani=iseqx[l]*tmswf[0][0]+iseqy[l]*tmswf[0][1]+iseqz[l]*tmswf[0][2];
yani=iseqx[l]*tmswf[1][0]+iseqy[l]*tmswf[1][1]+iseqz[l]*tmswf[1][2];
zani=iseqx[l]*tmswf[2][0]+iseqy[l]*tmswf[2][1]+iseqz[l]*tmswf[2][2];
dxani=xani*yaxis[1]*zaxis[2]+yani*yaxis[2]*zaxis[0]
+zani*yaxis[0]*zaxis[1]-zani*yaxis[1]*zaxis[0]
-xani*yaxis[2]*zaxis[1]-yani*yaxis[0]*zaxis[2];
dyani=xaxis[0]*yani*zaxis[2]+xaxis[1]*zani*zaxis[0]
+xaxis[2]*xani*zaxis[1]-xaxis[2]*yani*zaxis[0]
-xaxis[0]*zani*zaxis[1]-xaxis[1]*xani*zaxis[2];
dzani=xaxis[0]*yaxis[1]*zani+xaxis[1]*yaxis[2]*xani
+xaxis[2]*yaxis[0]*yani-xaxis[2]*yaxis[1]*xani
-xaxis[0]*yaxis[2]*yani-xaxis[1]*yaxis[0]*zani;
xani=dxani/dani;
yani=dyani/dani;
zani=dzani/dani;
// itms=maps(ix,iy,iz)+1
//TRACE("\nd %f %f %f %f",dxani,xani,yaxis[0],zaxis[0]);
itms1=*(iXCTapd[jz0]+jy0*NI+jx0);
elp=xani*xani/vt2[itms1]+
yani*yani/vt2[itms1]+
zani*zani/vl2[itms1];
// write(0,*) x,y,z,elp
// TRACE("\n %d %f",itms1,elp);
if (elp > 1.0) continue;
}
}
idltt=ic-*(iXCT[jz]+jy*NI+jx);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) idltt=INFTIME;
// --- change in cycle length ------>
idltc=idltt+*(iXCTOld[jz]+jy*NI+jx)-*(iXCT[jz]+jy*NI+jx);
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME) idltc=0;
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
*(iparm+(1-1)*NPARM+23)>0)
idltc = ic-*(iXCT[jz]+jy*NI+jx)-*(iparm+(1-1)*NPARM+23);
// rdXCT(ic,jx,jy,jz);
// irp = time in phase 2 + mapACT/3 +plateau of potential in phase 3 *idltc/100
// --- absolute refractory period ------>
//irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3
// +*(iparm+(mCell-1)*NPARM+10)*idltc/100;
irp=(*(iparm+NPARM*(mCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3;
//if (mCell==6)
// irp=(*(iparm+NPARM*(mCell-1)+4))/3;
irel=idltt-irp;
if (irel<=0) continue; // continue;
if (*(iXCT[jz]+jy*NI+jx)==INFTIME && mCell==3) {irel=INFTIME;*(mapAPD[jz]+jy*NI+jx)=0;}
*(mapAPD[jz]+jy*NI+jx) += *(iparm+(mCell-1)*NPARM+10)*idltc*3/100; //added by Zhu
/*
idltt=ic-*(iXCT[jz]+jy*NI+jx);
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) idltt=INFTIME;
idltc=idltt+*(iXCTOld[jz]+jy*NI+jx)-*(iXCT[jz]+jy*NI+jx);
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME) idltc=0;
if (*(iXCTOld[jz]+jy*NI+jx)==INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
*(iparm+(1-1)*NPARM+23)>0)
idltc = ic-*(iXCT[jz]+jy*NI+jx)-*(iparm+(1-1)*NPARM+23);
// rdXCT(ic,jx,jy,jz);
// irp=(*(iparm+NPARM*(kCell-1)+4)+*(mapACT[jz]+jy*NI+jx))/3+
// *(iparm+(kCell-1)*NPARM+10)*idltc/100;
//irp=(*(iparm+NPARM*(kCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3+
// *(iparm+(kCell-1)*NPARM+10)*idltc/100;
irp=(*(iparm+NPARM*(kCell-1)+4)+*(mapAPD[jz]+jy*NI+jx))/3;
*(mapAPD[jz]+jy*NI+jx) += *(iparm+(kCell-1)*NPARM+10)*idltc*3/100; //added by Zhu
irel=idltt-irp;
if (*(iXCT[jz]+jy*NI+jx)==INFTIME) irel=INFTIME;
*/
irel = 3*irel;
//if (*(iXCTOld[jz]+jy*NI+jx)!=INFTIME && *(iXCT[jz]+jy*NI+jx)!=INFTIME &&
// irel>=*(iparm+NPARM*(kCell-1)+5))
// *(mapAPD[jz]+jy*NI+jx)=0;
if ((irel)<*(iparm+NPARM*(kCell-1)+5)) {
tmp=100+*(iparm+NPARM*(mCell-1)+32)
-irel*(*(iparm+NPARM*(mCell-1)+32))/(*(iparm+NPARM*(mCell-1)+5));
if (tmp!=0) {
ivel = 100*(*(iparm+NPARM*(mCell-1)+31))/tmp;
} else {
ivel=*(iparm+NPARM*(mCell-1)+31);
}
} else {
ivel=*(iparm+NPARM*(kCell-1)+31);
}
*(mapSpeed[jz]+jy*NI+jx)=ivel-*(iparm+NPARM*(kCell-1)+31);//added by Zhu
nStep=nStep+1;
*(kACTv[0]+nStep)=jx;
*(kACTv[1]+nStep)=jy;
*(kACTv[2]+nStep)=jz;
// nStep++;
mACTv=mACTv+1;
*(iACTvOld[0]+mACTv)=jx;
*(iACTvOld[1]+mACTv)=jy;
*(iACTvOld[2]+mACTv)=jz;
*(iACTvOld[3]+mACTv)=ivel+ires;
// mACTv++;
// TRACE(" D%d,",mACTv);
wtXCTm(ic,jx,jy,jz);
//if (jx==101 && jy==77 && jz==6) TRACE("\nD mxcycle=%d at ic=%d, iCell=%d",mxcycle,ic,kCell);
//if (kCell >2 && kCell <6) TRACE("\nD %d %d %d %d %d %d",kCell,jx,jy,jz,ic,mACTv);
//TRACE("\nbreak4 mxcycle=%d NCYCL=%d ",mxcycle, NCYCL);
if (mxcycle>=NCYCL) {
//TRACE("\nbreak5 iSTOP=%d mxcycle=%d,NCYCL=%d",iSTOP, mxcycle, NCYCL);
iSTOP =1;
break;
}
*(iXCTOld[jz]+jy*NI+jx)=*(iXCT[jz]+jy*NI+jx);
*(iXCT[jz]+jy*NI+jx)=ic;
excited=1;
}
/*// >>>>>>>> anisotropy >>>>>
if (ANISO==1 && mCell == 7) {
// ltrat==3;
if (*(iXCTapd[jz0]+jy0*NI+jx0) < 3) {
mACTv=mACTv+1;
*(iACTvOld[0]+mACTv)=jx0;
*(iACTvOld[1]+mACTv)=jy0;
*(iACTvOld[2]+mACTv)=jz0;
*(iACTvOld[3]+mACTv)=ivel+ires;
//*(iACTvOld[3]+nACTvOld)=ires+
// *(iparm+NPARM*(iCell-1)+31);
*(iACTvOld[3]+nACTvOld)=ires+
*(iparm+NPARM*(mCell-1)+31)+*(mapSpeed[jz0]+jy0*NI+jx0);
*(iXCTapd[jz0]+jy0*NI+jx0)+=1;
} else {
*(iXCTapd[jz0]+jy0*NI+jx0)=0;
}
}
// <<<<<<<<<<<*/
if (iSTOP ==1) break;
}
if (iSTOP ==1) break;
if (nStep==0) break; // continue;
jdist=jdist+1;
if (jdist>=idist) break; // continue;
for(k=1;k<=nStep;k++) {
*(jACTv[0]+k)=*(kACTv[0]+k);
*(jACTv[1]+k)=*(kACTv[1]+k);
*(jACTv[2]+k)=*(kACTv[2]+k);
}
nbrch=nStep;
nStep=0;
}
}
//TRACE("\nbreak5 iSTOP=%d ",iSTOP);
if (iSTOP ==1) break;
// <------- END of propagation (1000) -------------
if (excited == 0) break;
nACTv=mACTv;
// nblck=nblck+nACTv;
for(i=1;i<=nACTv;i++) {
for(j=0;j<4;j++) {
*(iACTv[j]+i)=*(iACTvOld[j]+i);
}
}
} // END of whole while loop
TRACE("\nmxcycle=%d",mxcycle);
mxcycle++; // hui
// add HB info
for (itmp=0; itmp<50*ND; itmp++) {
for (tmp=0;tmp<NCYCL;tmp++) {
vHB[tmp][itmp]=0;
}
}
for (itmp=0; itmp<nHB; itmp++) {
l=iHB[0][itmp];
j=iHB[1][itmp];
k=iHB[2][itmp];
if (itmp==0) i=*(locXCT[k]+j*NJ+l); // Consider only the point near AV Node
for (tmp=0;tmp<mxcycle;tmp++) {
vHB[tmp][itmp]=*(mapXCTm[tmp]+i);
}
}
// Save
CFile f;
CFileException e;
//short int index;
//index=filepath.FindOneOf(".");
//filepath.SetAt(index+1,'x');
//filepath.SetAt(index+2,'c');
//filepath.SetAt(index+3,'t');
if (!f.Open( dataPath+"tour.xct ", CFile::modeCreate | CFile::modeWrite, &e )) {
#ifdef _DEBUG
afxDump << "File could not be opened " << e.m_cause << "\n";
#endif
}
//f.Write(&mxcycle,2);
f.Write(&miBN,2);
f.Write(&ic,2);
f.Write(&totalCell,4);
for(j=0;j<mxcycle;j++) {
for(i=0;i<totalCell;i++) f.Write(mapXCTm[j]+i,2);
}
f.Close();
/*
FILE * iow;
iow=fopen("fpMapXCTm.txt","wt");
if (iow == NULL) {
fprintf(stderr, "Open .txt for write failed! \n");
return;
}
long temploc;
temploc=*(locXCT[45]+22*NJ+33);
fprintf(iow,"33 22 45 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[40]+30*NJ+32);
fprintf(iow,"32 30 40 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[48]+20*NJ+30);
fprintf(iow,"30 20 48 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[56]+8*NJ+26);
fprintf(iow,"26 8 56 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[62]+10*NJ+21);
fprintf(iow,"21 10 62 %3d\n",*(mapXCTm[0]+temploc));
temploc=*(locXCT[62]+30*NJ+13);
fprintf(iow,"13 30 62 %3d\n",*(mapXCTm[0]+temploc));
for(l=0;l<mxcycle;l++) {
fprintf(iow,"l=%d\n",l);
for(k=0;k<NK;k++) {
for(j=0;j<NJ;j++) {
for(i=NI-1;i>-1;i--) {
temploc = *(locXCT[k]+j*NJ+i);
if (temploc < 0) fprintf(iow," ");
else fprintf(iow,"%3d ",*(mapXCTm[l]+temploc));
}
fprintf(iow,"j=%d\n",j);
}
fprintf(iow,"k=%d\n",k);
}
}
fclose(iow);
*/
for(i=0;i<4;i++) {
free(iACTv[i]);
free(iACTvOld[i]);
free(jACTv[i]);
free(kACTv[i]);
}
for(i=0;i<NK;i++) {
free(iXCT[i]);
free(iXCTapd[i]);
free(iXCTOld[i]);
}
} |
b4f44d92fb6d159acf58c033f684dc6526013fb9.hip | // !!! This is a file automatically generated by hipify!!!
#include "jacketSDK.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "hip/device_functions.h"
#include <iostream>
const int TPB=128;
__global__ void wave1D_rusanov1(double * f_nm, double * f_in,
double nu,int N){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<N){
int x_p = tid+1;
if(x_p==N) x_p=0;
double fp = f_in[x_p];
double f = f_in[tid];
f_nm[tid]=0.5*(fp+f)-(nu/3.)*(fp-f);
}
}
__global__ void wave1D_rusanov2(double * f_tmp,double * f_nm,
double * f_in, double nu, int N){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if(tid<N){
int x_m = tid-1;
if(x_m<0) x_m = (N-1);
f_tmp[tid]=f_in[tid]-(2.*nu/3.)*(f_nm[tid]-f_nm[x_m]);
}
}
__global__ void wave1D_rusanov3(double * f_next,double * f_tmp,
double * f_in, double nu,
double omega, int N){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<N){
int x_2m=tid-2;
if(x_2m<0) x_2m+=N;
int x_m = tid-1;
if(x_m<0) x_m+=N;
int x_p = tid+1;
if(x_p>(N-1)) x_p-=N;
int x_2p = tid+2;
if(x_2p>(N-1)) x_2p-=N;
double f_2m = f_in[x_2m];
double f_m = f_in[x_m];
double f = f_in[tid];
double f_p = f_in[x_p];
double f_2p = f_in[x_2p];
f_next[tid]=f-(nu/24.)*(-2.*f_2p+7.*f_p - 7.*f_m+2.*f_2m)
-(3.*nu/8.)*(f_tmp[x_p]-f_tmp[x_m])
-(omega/24.)*(f_2p - 4.*f_p + 6.*f - 4.*f_m + f_2m);
}
}
err_t jktFunction(int nlhs,mxArray * plhs[], int nrhs, mxArray * prhs[]){
if(nrhs!=3)
return err("Usage: f_next = wave1D_rusanov(f_in,nu,omega)");
mxArray * m_f_in = prhs[0];
double nu = mxGetScalar(prhs[1]);
double omega = mxGetScalar(prhs[2]);
//double dx = mxGetScalar(prhs[3]);
mxClassID cls = jkt_class(m_f_in);
const mwSize * dims;
int status = jkt_dims(m_f_in,&dims);
int M = dims[0];
int N = dims[1];
mxArray * m_f_next = plhs[0]=jkt_new(M,N,cls,false);
mxArray * m_ftmp1 = jkt_new(M,N,cls,false);
mxArray * m_fnm = jkt_new(M,N,cls,false);
double * f_in;
double * f_next;
double * f_tmp1;
double * f_nm;
jkt_mem((void**)&f_in, m_f_in);
jkt_mem((void**)&f_next,m_f_next);
jkt_mem((void**)&f_tmp1,m_ftmp1);
jkt_mem((void**)&f_nm,m_fnm);
dim3 BLOCKS(TPB,1,1);
dim3 GRIDS((N+TPB-1)/TPB,1,1);
hipLaunchKernelGGL(( wave1D_rusanov1), dim3(GRIDS),dim3(BLOCKS), 0, 0, f_nm,f_in,nu,N*M);
hipLaunchKernelGGL(( wave1D_rusanov2), dim3(GRIDS),dim3(BLOCKS), 0, 0, f_tmp1,f_nm,f_in,nu,N*M);
hipLaunchKernelGGL(( wave1D_rusanov3), dim3(GRIDS),dim3(BLOCKS), 0, 0, f_next,f_tmp1,f_in,nu,omega,N*M);
return errNone;
}
| b4f44d92fb6d159acf58c033f684dc6526013fb9.cu | #include "jacketSDK.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_functions.h"
#include <iostream>
const int TPB=128;
__global__ void wave1D_rusanov1(double * f_nm, double * f_in,
double nu,int N){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<N){
int x_p = tid+1;
if(x_p==N) x_p=0;
double fp = f_in[x_p];
double f = f_in[tid];
f_nm[tid]=0.5*(fp+f)-(nu/3.)*(fp-f);
}
}
__global__ void wave1D_rusanov2(double * f_tmp,double * f_nm,
double * f_in, double nu, int N){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if(tid<N){
int x_m = tid-1;
if(x_m<0) x_m = (N-1);
f_tmp[tid]=f_in[tid]-(2.*nu/3.)*(f_nm[tid]-f_nm[x_m]);
}
}
__global__ void wave1D_rusanov3(double * f_next,double * f_tmp,
double * f_in, double nu,
double omega, int N){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<N){
int x_2m=tid-2;
if(x_2m<0) x_2m+=N;
int x_m = tid-1;
if(x_m<0) x_m+=N;
int x_p = tid+1;
if(x_p>(N-1)) x_p-=N;
int x_2p = tid+2;
if(x_2p>(N-1)) x_2p-=N;
double f_2m = f_in[x_2m];
double f_m = f_in[x_m];
double f = f_in[tid];
double f_p = f_in[x_p];
double f_2p = f_in[x_2p];
f_next[tid]=f-(nu/24.)*(-2.*f_2p+7.*f_p - 7.*f_m+2.*f_2m)
-(3.*nu/8.)*(f_tmp[x_p]-f_tmp[x_m])
-(omega/24.)*(f_2p - 4.*f_p + 6.*f - 4.*f_m + f_2m);
}
}
err_t jktFunction(int nlhs,mxArray * plhs[], int nrhs, mxArray * prhs[]){
if(nrhs!=3)
return err("Usage: f_next = wave1D_rusanov(f_in,nu,omega)");
mxArray * m_f_in = prhs[0];
double nu = mxGetScalar(prhs[1]);
double omega = mxGetScalar(prhs[2]);
//double dx = mxGetScalar(prhs[3]);
mxClassID cls = jkt_class(m_f_in);
const mwSize * dims;
int status = jkt_dims(m_f_in,&dims);
int M = dims[0];
int N = dims[1];
mxArray * m_f_next = plhs[0]=jkt_new(M,N,cls,false);
mxArray * m_ftmp1 = jkt_new(M,N,cls,false);
mxArray * m_fnm = jkt_new(M,N,cls,false);
double * f_in;
double * f_next;
double * f_tmp1;
double * f_nm;
jkt_mem((void**)&f_in, m_f_in);
jkt_mem((void**)&f_next,m_f_next);
jkt_mem((void**)&f_tmp1,m_ftmp1);
jkt_mem((void**)&f_nm,m_fnm);
dim3 BLOCKS(TPB,1,1);
dim3 GRIDS((N+TPB-1)/TPB,1,1);
wave1D_rusanov1<<<GRIDS,BLOCKS>>>(f_nm,f_in,nu,N*M);
wave1D_rusanov2<<<GRIDS,BLOCKS>>>(f_tmp1,f_nm,f_in,nu,N*M);
wave1D_rusanov3<<<GRIDS,BLOCKS>>>(f_next,f_tmp1,f_in,nu,omega,N*M);
return errNone;
}
|
b41cc1d7e560f9160c6188504b76711e9e0a7300.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* graph.cu
*
* Functions for the graph_t type are implemented here.
*/
#include <stdio.h>
#include "graph.h"
#include "node.h"
__global__ void graph_init(graph_t* g) {
g->node_count = 0;
g->nodes = NULL;
g->visited_nodes = NULL;
g->distances = NULL;
g->parents = NULL;
}
__global__ void graph_add(graph_t* g, node_t* n) {
// empty graph
if (g->node_count == 0) {
__graph_add_empty(g, n);
return;
}
/*
* CUDA doesn't have a native equivalent to realloc(), so whenever an
* item is added to an array, an entire new array must be created...
*/
node_t** new_nodes = (node_t**) malloc(sizeof(node_t*)*(g->node_count+1));
node_t** new_visited_nodes = (node_t**) malloc(sizeof(node_t*)*(g->node_count+1));
float* new_distances = (float*) malloc(sizeof(int)*(g->node_count+1));
node_t** new_parents = (node_t**) malloc(sizeof(node_t*)*(g->node_count+1));
// copy
for (int x=0; x<(g->node_count); x++) {
new_nodes[x] = g->nodes[x];
new_visited_nodes[x] = g->visited_nodes[x];
new_distances[x] = g->distances[x];
new_parents[x] = g->parents[x];
}
// add n
new_nodes[g->node_count] = n;
new_visited_nodes[g->node_count] = NULL;
new_distances[g->node_count] = INFINITY;
new_parents[g->node_count] = NULL;
g->node_count += 1;
// destroy old
free(g->nodes);
free(g->visited_nodes);
free(g->distances);
free(g->parents);
// reassign
g->nodes = new_nodes;
g->visited_nodes = new_visited_nodes;
g->distances = new_distances;
g->parents = new_parents;
}
__device__ static void __graph_add_empty(graph_t* g, node_t* n) {
g->nodes = (node_t**) malloc(sizeof(node_t*));
g->visited_nodes = (node_t**) malloc(sizeof(node_t*));
g->distances = (float*) malloc(sizeof(int));
g->parents = (node_t**) malloc(sizeof(node_t*));
g->nodes[0] = n;
g->visited_nodes[0] = NULL;
g->distances[0] = INFINITY;
g->parents[0] = NULL;
g->node_count += 1;
}
__global__ void graph_free(graph_t* g) {
free(g->nodes);
free(g->visited_nodes);
free(g->distances);
free(g->parents);
free(g);
}
__global__ void graph_print(graph_t* g, bool verbose) {
if (verbose) {
printf("graph %p \n", g);
printf("node_count %d \n", g->node_count);
printf("nodes[] %p \n", g->nodes);
printf("visited_nodes[] %p \n", g->visited_nodes);
printf("distances[] %p \n", g->distances);
printf("parents[] %p \n", g->parents);
}
__graph_print_arr_nodes(g);
__graph_print_arr_visited_nodes(g);
__graph_print_arr_distances(g);
__graph_print_arr_parents(g);
printf("\n");
}
__device__ static void __graph_print_arr_nodes(graph_t* g) {
if (!g->nodes) {
printf("nodes -\n");
return;
}
printf("nodes \n");
for (int x=0; x<g->node_count; x++) {
if (g->nodes[x] != NULL)
printf(" %c (%p) \n", g->nodes[x]->id, g->nodes[x]);
else
printf(" -\n");
}
}
__device__ static void __graph_print_arr_visited_nodes(graph_t* g) {
if (!g->visited_nodes) {
printf("visited_nodes -\n");
return;
}
printf("visited_nodes \n");
for (int x=0; x<g->node_count; x++) {
if (g->visited_nodes[x] != NULL)
printf(" %c (%p) \n", g->visited_nodes[x]->id, g->visited_nodes[x]);
else
printf(" -\n");
}
}
__device__ static void __graph_print_arr_distances(graph_t* g) {
if (!g->distances) {
printf("distances -\n");
return;
}
printf("distances \n");
for (int x=0; x<g->node_count; x++) {
if (g->distances[x] != INFINITY)
printf(" %.3f \n", g->distances[x]);
else
printf(" -\n");
}
}
__device__ static void __graph_print_arr_parents(graph_t* g) {
if (!g->parents) {
printf("parents -\n");
return;
}
printf("parents \n");
for (int x=0; x<g->node_count; x++) {
if (g->parents[x] != NULL)
printf(" %c (%p) \n", g->parents[x]->id, g->parents[x]);
else
printf(" -\n");
}
}
| b41cc1d7e560f9160c6188504b76711e9e0a7300.cu | /*
* graph.cu
*
* Functions for the graph_t type are implemented here.
*/
#include <stdio.h>
#include "graph.h"
#include "node.h"
__global__ void graph_init(graph_t* g) {
g->node_count = 0;
g->nodes = NULL;
g->visited_nodes = NULL;
g->distances = NULL;
g->parents = NULL;
}
__global__ void graph_add(graph_t* g, node_t* n) {
// empty graph
if (g->node_count == 0) {
__graph_add_empty(g, n);
return;
}
/*
* CUDA doesn't have a native equivalent to realloc(), so whenever an
* item is added to an array, an entire new array must be created...
*/
node_t** new_nodes = (node_t**) malloc(sizeof(node_t*)*(g->node_count+1));
node_t** new_visited_nodes = (node_t**) malloc(sizeof(node_t*)*(g->node_count+1));
float* new_distances = (float*) malloc(sizeof(int)*(g->node_count+1));
node_t** new_parents = (node_t**) malloc(sizeof(node_t*)*(g->node_count+1));
// copy
for (int x=0; x<(g->node_count); x++) {
new_nodes[x] = g->nodes[x];
new_visited_nodes[x] = g->visited_nodes[x];
new_distances[x] = g->distances[x];
new_parents[x] = g->parents[x];
}
// add n
new_nodes[g->node_count] = n;
new_visited_nodes[g->node_count] = NULL;
new_distances[g->node_count] = INFINITY;
new_parents[g->node_count] = NULL;
g->node_count += 1;
// destroy old
free(g->nodes);
free(g->visited_nodes);
free(g->distances);
free(g->parents);
// reassign
g->nodes = new_nodes;
g->visited_nodes = new_visited_nodes;
g->distances = new_distances;
g->parents = new_parents;
}
__device__ static void __graph_add_empty(graph_t* g, node_t* n) {
g->nodes = (node_t**) malloc(sizeof(node_t*));
g->visited_nodes = (node_t**) malloc(sizeof(node_t*));
g->distances = (float*) malloc(sizeof(int));
g->parents = (node_t**) malloc(sizeof(node_t*));
g->nodes[0] = n;
g->visited_nodes[0] = NULL;
g->distances[0] = INFINITY;
g->parents[0] = NULL;
g->node_count += 1;
}
__global__ void graph_free(graph_t* g) {
free(g->nodes);
free(g->visited_nodes);
free(g->distances);
free(g->parents);
free(g);
}
__global__ void graph_print(graph_t* g, bool verbose) {
if (verbose) {
printf("graph %p \n", g);
printf("node_count %d \n", g->node_count);
printf("nodes[] %p \n", g->nodes);
printf("visited_nodes[] %p \n", g->visited_nodes);
printf("distances[] %p \n", g->distances);
printf("parents[] %p \n", g->parents);
}
__graph_print_arr_nodes(g);
__graph_print_arr_visited_nodes(g);
__graph_print_arr_distances(g);
__graph_print_arr_parents(g);
printf("\n");
}
__device__ static void __graph_print_arr_nodes(graph_t* g) {
if (!g->nodes) {
printf("nodes -\n");
return;
}
printf("nodes \n");
for (int x=0; x<g->node_count; x++) {
if (g->nodes[x] != NULL)
printf(" %c (%p) \n", g->nodes[x]->id, g->nodes[x]);
else
printf(" -\n");
}
}
__device__ static void __graph_print_arr_visited_nodes(graph_t* g) {
if (!g->visited_nodes) {
printf("visited_nodes -\n");
return;
}
printf("visited_nodes \n");
for (int x=0; x<g->node_count; x++) {
if (g->visited_nodes[x] != NULL)
printf(" %c (%p) \n", g->visited_nodes[x]->id, g->visited_nodes[x]);
else
printf(" -\n");
}
}
__device__ static void __graph_print_arr_distances(graph_t* g) {
if (!g->distances) {
printf("distances -\n");
return;
}
printf("distances \n");
for (int x=0; x<g->node_count; x++) {
if (g->distances[x] != INFINITY)
printf(" %.3f \n", g->distances[x]);
else
printf(" -\n");
}
}
__device__ static void __graph_print_arr_parents(graph_t* g) {
if (!g->parents) {
printf("parents -\n");
return;
}
printf("parents \n");
for (int x=0; x<g->node_count; x++) {
if (g->parents[x] != NULL)
printf(" %c (%p) \n", g->parents[x]->id, g->parents[x]);
else
printf(" -\n");
}
}
|
72aa467dc8c47db33a5030e5d78c95ca8d079c89.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/types.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
#include <hip/hip_runtime_api.h>
#include "../cudaErr.h"
#include "const.h"
#include "Pluser.hpp"
#include "timer.h"
//global variables to be initialized
int shmid;
TYPE *ptr;
void init ()
{
// FILE to key
key_t key = ftok (FILENAME, FILEID);
if (key == -1)
{
printf ("ftok failed, errno = %s\n", strerror(errno));
exit (-1);
}
// getting SHM id
shmid = shmget (key, sizeof (TYPE), 0666|IPC_CREAT);
if (shmid == -1)
{
printf ("shmget failed, errno = %s\n", strerror(errno));
exit (-1);
}
// attach the SHM to this process
ptr = (TYPE *) shmat (shmid, (void *)0, 0);
}
__global__ void print_first (GPU_DATA_TYPE *data)
{
printf ("before main loop, first element is %.4f\n", data[0]);
}
void sigInt_handler (int sig)
{
if (sig == SIGINT)
{
printf ("pluser received SIGINT, calling hipProfilerStop before exiting \n");
gpuErrchk (hipProfilerStop ());
exit (0);
}
}
int main()
{
// a previous process should have allocated the SHM, we attach to it
init ();
printf ("ptr is %p\n", (void *)ptr);
gpuErrchk (hipProfilerStart ());
Pluser *pluser = new Pluser (shmid, ptr);
pluser->set_GPUIPC_handle (&(ptr->memHandle));
printf ("init pluser: setting d_data\n");
GPU_DATA_TYPE *addr_d_data;
gpuErrchk (hipIpcOpenMemHandle ((void **)&addr_d_data, *(pluser->get_GPUIPC_handle()), hipIpcMemLazyEnablePeerAccess));
pluser->set_d_data (addr_d_data);
// set the signal handling function
if (signal (SIGINT, sigInt_handler) == SIG_ERR)
{
printf ("cannot handle SIGINT\n");
exit(-1);
}
struct timespec stamp, previous_stamp;
clock_gettime (CLOCK_MONOTONIC, &previous_stamp);
double wtime;
// main loop
// while (true)
for (int i = 0; i < 1000000; i++)
{
pluser->update ();
pluser->wait ();
pluser->process ();
// timer stops at iteration i+1
clock_gettime (CLOCK_MONOTONIC, &stamp);
wtime = (stamp.tv_sec - previous_stamp.tv_sec) * 1000000000 + (stamp.tv_nsec - previous_stamp.tv_nsec);
printf ("%.4f\n", wtime);
// timer starts at iteration i
clock_gettime (CLOCK_MONOTONIC, &previous_stamp);
pluser->notify ();
// usleep (20);
}
// gpuErrchk (hipDeviceSynchronize ());
}
| 72aa467dc8c47db33a5030e5d78c95ca8d079c89.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/types.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
#include <cuda_profiler_api.h>
#include "../cudaErr.h"
#include "const.h"
#include "Pluser.hpp"
#include "timer.h"
//global variables to be initialized
int shmid;
TYPE *ptr;
void init ()
{
// FILE to key
key_t key = ftok (FILENAME, FILEID);
if (key == -1)
{
printf ("ftok failed, errno = %s\n", strerror(errno));
exit (-1);
}
// getting SHM id
shmid = shmget (key, sizeof (TYPE), 0666|IPC_CREAT);
if (shmid == -1)
{
printf ("shmget failed, errno = %s\n", strerror(errno));
exit (-1);
}
// attach the SHM to this process
ptr = (TYPE *) shmat (shmid, (void *)0, 0);
}
__global__ void print_first (GPU_DATA_TYPE *data)
{
printf ("before main loop, first element is %.4f\n", data[0]);
}
void sigInt_handler (int sig)
{
if (sig == SIGINT)
{
printf ("pluser received SIGINT, calling cudaProfilerStop before exiting \n");
gpuErrchk (cudaProfilerStop ());
exit (0);
}
}
int main()
{
// a previous process should have allocated the SHM, we attach to it
init ();
printf ("ptr is %p\n", (void *)ptr);
gpuErrchk (cudaProfilerStart ());
Pluser *pluser = new Pluser (shmid, ptr);
pluser->set_GPUIPC_handle (&(ptr->memHandle));
printf ("init pluser: setting d_data\n");
GPU_DATA_TYPE *addr_d_data;
gpuErrchk (cudaIpcOpenMemHandle ((void **)&addr_d_data, *(pluser->get_GPUIPC_handle()), cudaIpcMemLazyEnablePeerAccess));
pluser->set_d_data (addr_d_data);
// set the signal handling function
if (signal (SIGINT, sigInt_handler) == SIG_ERR)
{
printf ("cannot handle SIGINT\n");
exit(-1);
}
struct timespec stamp, previous_stamp;
clock_gettime (CLOCK_MONOTONIC, &previous_stamp);
double wtime;
// main loop
// while (true)
for (int i = 0; i < 1000000; i++)
{
pluser->update ();
pluser->wait ();
pluser->process ();
// timer stops at iteration i+1
clock_gettime (CLOCK_MONOTONIC, &stamp);
wtime = (stamp.tv_sec - previous_stamp.tv_sec) * 1000000000 + (stamp.tv_nsec - previous_stamp.tv_nsec);
printf ("%.4f\n", wtime);
// timer starts at iteration i
clock_gettime (CLOCK_MONOTONIC, &previous_stamp);
pluser->notify ();
// usleep (20);
}
// gpuErrchk (cudaDeviceSynchronize ());
}
|
3da082d172379208c3bca87750c9c5354b4b170a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#if TORCH_HIP_VERSION>=5000
#include <helper_cuda.h>
#define cutilSafeCall checkCudaErrors
#else
#include <cutil_inline.h>
#endif
#include "matrix.h"
#include "utils.h"
__global__ void dot_cuda ( CUVEC vec );
void dot( CUVEC vec, int blockPerGrid )
{
hipLaunchKernelGGL(( dot_cuda) , dim3(blockPerGrid) , dim3(ThreadPerBlock) , 0, 0, vec);
hipDeviceSynchronize();
}
__global__ void dot_cuda ( CUVEC vec )
{
__shared__ float chache[ThreadPerBlock] ;
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x ;
unsigned int chacheIndex = threadIdx.x ;
float* V1 = vec.v1;
float* V2 = vec.v2;
float* V3 = vec.v_ret;
int length = vec.length;
float temp=0;
while ( tid < length )
{
temp += V1[tid] * V2[tid] ;
tid += blockDim.x * gridDim.x ;
}
chache[chacheIndex] = temp ;
__syncthreads();
int i = blockDim.x / 2 ;
while ( i!=0 )
{
if ( chacheIndex < i )
{
chache[chacheIndex] += chache [chacheIndex + i] ;
}
__syncthreads();
i/=2 ;
}
if ( chacheIndex == 0 )
{
V3[blockIdx.x] = chache [0] ;
}
} | 3da082d172379208c3bca87750c9c5354b4b170a.cu | #include <iostream>
#include <cuda.h>
#if CUDA_VERSION>=5000
#include <helper_cuda.h>
#define cutilSafeCall checkCudaErrors
#else
#include <cutil_inline.h>
#endif
#include "matrix.h"
#include "utils.h"
__global__ void dot_cuda ( CUVEC vec );
void dot( CUVEC vec, int blockPerGrid )
{
dot_cuda <<< blockPerGrid , ThreadPerBlock >>> (vec);
cudaThreadSynchronize();
}
__global__ void dot_cuda ( CUVEC vec )
{
__shared__ float chache[ThreadPerBlock] ;
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x ;
unsigned int chacheIndex = threadIdx.x ;
float* V1 = vec.v1;
float* V2 = vec.v2;
float* V3 = vec.v_ret;
int length = vec.length;
float temp=0;
while ( tid < length )
{
temp += V1[tid] * V2[tid] ;
tid += blockDim.x * gridDim.x ;
}
chache[chacheIndex] = temp ;
__syncthreads();
int i = blockDim.x / 2 ;
while ( i!=0 )
{
if ( chacheIndex < i )
{
chache[chacheIndex] += chache [chacheIndex + i] ;
}
__syncthreads();
i/=2 ;
}
if ( chacheIndex == 0 )
{
V3[blockIdx.x] = chache [0] ;
}
} |
41d57530d08310efdc2b2dbcf0eaa8b39b68c682.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************//**
* \file
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief functions to invoke the kernels that setup the intermediate velocity solve
*/
#include <solvers/NavierStokes/NavierStokesSolver.h>
#include <solvers/NavierStokes/NavierStokes/kernels/CFL.h>
void NavierStokesSolver::CFL()
{
logger.startTimer("CFL");
const int blocksize = 256;
dim3 grid( int( (nx*ny-0.5)/blocksize ) +1, 1);
dim3 block(blocksize, 1);
hipLaunchKernelGGL(( kernels::calculateCFL), dim3(grid),dim3(block), 0, 0, cfl_r, u_r, dx_r, dy_r, nx, ny, dt);
thrust::device_vector<double>::iterator iter = thrust::max_element(cfl.begin(),cfl.end());
unsigned int position = iter - cfl.begin();
double max_val = *iter;
if (max_val > cfl_max)
{
if (timeStep>100 && max_val > cfl_max*1.2)
std::cout<<"WARNING: Significant maximum CFL change detected, potential crash imminent.\n";
//crash();
cfl_max = max_val;
cfl_I = position%nx;
cfl_J = int(position/nx);
cfl_ts = timeStep;
}
logger.stopTimer("CFL");
}
| 41d57530d08310efdc2b2dbcf0eaa8b39b68c682.cu | /***************************************************************************//**
* \file
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief functions to invoke the kernels that setup the intermediate velocity solve
*/
#include <solvers/NavierStokes/NavierStokesSolver.h>
#include <solvers/NavierStokes/NavierStokes/kernels/CFL.h>
void NavierStokesSolver::CFL()
{
logger.startTimer("CFL");
const int blocksize = 256;
dim3 grid( int( (nx*ny-0.5)/blocksize ) +1, 1);
dim3 block(blocksize, 1);
kernels::calculateCFL<<<grid,block>>>(cfl_r, u_r, dx_r, dy_r, nx, ny, dt);
thrust::device_vector<double>::iterator iter = thrust::max_element(cfl.begin(),cfl.end());
unsigned int position = iter - cfl.begin();
double max_val = *iter;
if (max_val > cfl_max)
{
if (timeStep>100 && max_val > cfl_max*1.2)
std::cout<<"WARNING: Significant maximum CFL change detected, potential crash imminent.\n";
//crash();
cfl_max = max_val;
cfl_I = position%nx;
cfl_J = int(position/nx);
cfl_ts = timeStep;
}
logger.stopTimer("CFL");
}
|
99e34d26abe8e83e57e13791e27a34967d3d650c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <scalar.h>
__device__ double op(double d1,double d2,double *params) {
return d2 * d1;
}
extern "C"
__global__ void mul_scalar_double(int n, int idx,double dx,double *dy,int incx,double *params,double *result) {
transform(n,idx,dx,dy,incx,params,result);
}
| 99e34d26abe8e83e57e13791e27a34967d3d650c.cu | #include <scalar.h>
__device__ double op(double d1,double d2,double *params) {
return d2 * d1;
}
extern "C"
__global__ void mul_scalar_double(int n, int idx,double dx,double *dy,int incx,double *params,double *result) {
transform(n,idx,dx,dy,incx,params,result);
}
|
3babebb508255f05c99c0021fc2c09b81033f7f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef GPU
#include "output_layer.h"
namespace mlp{
__global__ void dsigmoidKernel(float *g_, float *exp, float *output, int max_index)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < max_index)g_[i] = (exp[i]-output[i])*(output[i] * (1 - output[i]));
}
void OutputLayer::forward_gpu(){
this->err = 0;
exp_y_vec.clear();
/*XOR */
//exp_y_vec.push_back(this -> exp_y);
/*MNIST */
exp_y_vec.resize(in_depth_);
for (size_t i = 0; i < exp_y_vec.size(); i++)exp_y_vec[i] = 0;
exp_y_vec[this->exp_y] = 1;
checkerror(hipMemcpy(exp_y_gpu, exp_y_vec.data(), sizeof(float)*in_depth_, hipMemcpyHostToDevice),"exp_y h2d");
vec_t output_tmp;
output_tmp.resize(in_depth_);
checkerror(hipMemcpy(output_tmp.data(), input_gpu, sizeof(float)*in_depth_, hipMemcpyDeviceToHost), "output d2h");
for (size_t i = 0; i < in_depth_; i++){
err += 0.5 * (exp_y_vec[i] - output_tmp[i]) *
(exp_y_vec[i] - output_tmp[i]);
}
output_gpu = input_gpu;
}
void OutputLayer::fix_backprop_gpu(){
dim3 thread_num(32);
dim3 block_num((in_depth_+31) / 32);
dsigmoidKernel << <block_num, thread_num >> >(g_gpu, exp_y_gpu, output_gpu, in_depth_);
checkerror(hipDeviceSynchronize(), "dx sigmoid kernel");
}
}
#endif | 3babebb508255f05c99c0021fc2c09b81033f7f9.cu | #ifdef GPU
#include "output_layer.h"
namespace mlp{
__global__ void dsigmoidKernel(float *g_, float *exp, float *output, int max_index)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < max_index)g_[i] = (exp[i]-output[i])*(output[i] * (1 - output[i]));
}
void OutputLayer::forward_gpu(){
this->err = 0;
exp_y_vec.clear();
/*XOR 使用下面一行代码*/
//exp_y_vec.push_back(this -> exp_y);
/*MNIST 使用下面两行代码 */
exp_y_vec.resize(in_depth_);
for (size_t i = 0; i < exp_y_vec.size(); i++)exp_y_vec[i] = 0;
exp_y_vec[this->exp_y] = 1;
checkerror(cudaMemcpy(exp_y_gpu, exp_y_vec.data(), sizeof(float)*in_depth_, cudaMemcpyHostToDevice),"exp_y h2d");
vec_t output_tmp;
output_tmp.resize(in_depth_);
checkerror(cudaMemcpy(output_tmp.data(), input_gpu, sizeof(float)*in_depth_, cudaMemcpyDeviceToHost), "output d2h");
for (size_t i = 0; i < in_depth_; i++){
err += 0.5 * (exp_y_vec[i] - output_tmp[i]) *
(exp_y_vec[i] - output_tmp[i]);
}
output_gpu = input_gpu;
}
void OutputLayer::fix_backprop_gpu(){
dim3 thread_num(32);
dim3 block_num((in_depth_+31) / 32);
dsigmoidKernel << <block_num, thread_num >> >(g_gpu, exp_y_gpu, output_gpu, in_depth_);
checkerror(cudaDeviceSynchronize(), "dx sigmoid kernel");
}
}
#endif |
139cc7af83764a8d0e84b1cfc9f532292ae6172d.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/ConfinedStreamingMethodGPU.cu
* \brief Defines GPU functions and kernels used by mpcd::ConfinedStreamingMethodGPU
*
* \warning
* This file needs separable compilation with ExternalFields.cu. Any plugins extending
* the ConfinedStreamingGeometryGPU will also need to do separable compilation with
* ExternalFields.cu.
*/
#include "ConfinedStreamingMethodGPU.cuh"
#include "StreamingGeometry.h"
#include "ExternalField.h"
#include "hoomd/GPUPolymorph.cuh"
namespace mpcd
{
namespace gpu
{
//! Template instantiation of bulk geometry streaming
template hipError_t confined_stream<mpcd::detail::BulkGeometry>
(const stream_args_t& args, const mpcd::detail::BulkGeometry& geom);
//! Template instantiation of slit geometry streaming
template hipError_t confined_stream<mpcd::detail::SlitGeometry>
(const stream_args_t& args, const mpcd::detail::SlitGeometry& geom);
//! Template instantiation of slit geometry streaming
template hipError_t confined_stream<mpcd::detail::SlitPoreGeometry>
(const stream_args_t& args, const mpcd::detail::SlitPoreGeometry& geom);
} // end namespace gpu
} // end namespace mpcd
| 139cc7af83764a8d0e84b1cfc9f532292ae6172d.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/ConfinedStreamingMethodGPU.cu
* \brief Defines GPU functions and kernels used by mpcd::ConfinedStreamingMethodGPU
*
* \warning
* This file needs separable compilation with ExternalFields.cu. Any plugins extending
* the ConfinedStreamingGeometryGPU will also need to do separable compilation with
* ExternalFields.cu.
*/
#include "ConfinedStreamingMethodGPU.cuh"
#include "StreamingGeometry.h"
#include "ExternalField.h"
#include "hoomd/GPUPolymorph.cuh"
namespace mpcd
{
namespace gpu
{
//! Template instantiation of bulk geometry streaming
template cudaError_t confined_stream<mpcd::detail::BulkGeometry>
(const stream_args_t& args, const mpcd::detail::BulkGeometry& geom);
//! Template instantiation of slit geometry streaming
template cudaError_t confined_stream<mpcd::detail::SlitGeometry>
(const stream_args_t& args, const mpcd::detail::SlitGeometry& geom);
//! Template instantiation of slit geometry streaming
template cudaError_t confined_stream<mpcd::detail::SlitPoreGeometry>
(const stream_args_t& args, const mpcd::detail::SlitPoreGeometry& geom);
} // end namespace gpu
} // end namespace mpcd
|
aa1004b4522bf408b049b19241c3224d6ffb3840.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/hip/Sort.h>
#include <ATen/core/TensorBase.h>
#include <ATen/core/Array.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/cub.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/hip/NumericLimits.cuh>
#include <ATen/native/hip/SortUtils.cuh>
#include <ATen/native/hip/SortingCommon.cuh>
#include <limits>
#include <c10/core/DeviceArray.h>
namespace at::native {
template <typename T>
static int minimum_grid_for_occupancy(T kernel, int max_block_size) {
int minGridSize;
int blockSize;
C10_HIP_CHECK(hipOccupancyMaxPotentialBlockSize(
&minGridSize,
&blockSize,
kernel,
/*dynamicSMemSize=*/0,
max_block_size));
return minGridSize;
}
template <typename T>
constexpr bool has_nan() {
if constexpr (std::numeric_limits<T>::is_specialized) {
return std::numeric_limits<T>::has_quiet_NaN;
} else if constexpr (
c10::is_complex<T>::value ||
std::is_same_v<T, c10::BFloat16> ||
std::is_same_v<T, c10::Half>) {
return true;
}
}
// For very small unstable sorts (n <= 32), use bitonicSortKVInPlace
// which can sort multiple arrays within the same block of threads,
// improving occupancy.
struct SmallBitonicSort {
template <int A, typename K, typename V, typename IndexType>
void sort(
at::cuda::detail::TensorInfo<K, IndexType> keyInfo,
IndexType keySlices,
IndexType keySliceSize,
IndexType keySliceStride,
at::cuda::detail::TensorInfo<V, IndexType> valueInfo,
IndexType valueSliceStride,
bool descending) {
constexpr int sort_size = 32;
constexpr int max_block_y = 16;
constexpr int items_per_thread = 2;
static_assert(sort_size % items_per_thread == 0, "");
constexpr int block_x = sort_size / items_per_thread;
TORCH_INTERNAL_ASSERT(keySliceSize <= sort_size);
// Scale batch size down if the grid would be too small
const auto min_grid = minimum_grid_for_occupancy(
bitonicSortKVInPlace<
A, -1, block_x, max_block_y,
K, V, LTOp<K, true>, IndexType>,
block_x * max_block_y);
const auto max_batch = ::max(IndexType{1}, keySlices / min_grid);
const int block_y = ::min(IndexType(max_block_y), max_batch);
dim3 block(block_x, block_y);
dim3 grid;
const int grid_count = (keySlices + block_y - 1) / block_y;
TORCH_INTERNAL_ASSERT(getGridFromTiles(grid_count, grid),
"Too many slices to sort");
const auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (descending) {
hipLaunchKernelGGL(( bitonicSortKVInPlace<A, -1, block_x, max_block_y>)
, dim3(grid), dim3(block), 0, stream,
keyInfo,
keySlices,
keySliceSize,
keySliceStride,
valueInfo,
valueSliceStride,
GTOp<K, true>());
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( bitonicSortKVInPlace<A, -1, block_x, max_block_y>)
, dim3(grid), dim3(block), 0, stream,
keyInfo,
keySlices,
keySliceSize,
keySliceStride,
valueInfo,
valueSliceStride,
LTOp<K, true>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
};
// For small sorts (n <= 128) we use warpMergeSortKVInPlace which
// sorts one slice per warp and potentially multiple slices in the
// same block for improved occupancy with large batch sizes.
template <int sort_size>
struct WarpMergeSort {
template <int A, typename K, typename V, typename IndexType>
void sort(
at::cuda::detail::TensorInfo<K, IndexType> keyInfo,
IndexType keySlices,
IndexType keySliceSize,
IndexType keySliceStride,
at::cuda::detail::TensorInfo<V, IndexType> valueInfo,
IndexType valueSliceStride,
bool descending) {
constexpr int max_block_y = 16;
const int block_x = at::cuda::warp_size();
TORCH_INTERNAL_ASSERT(keySliceSize <= sort_size);
// Scale batch size down if the grid would be too small
const auto min_grid = minimum_grid_for_occupancy(
warpMergeSortKVInPlace<
A, -1, sort_size, max_block_y,
K, V, LTOp<K, true>, IndexType>,
block_x * max_block_y);
const auto max_batch = ::max(IndexType{1}, keySlices / min_grid);
const int block_y = ::min(IndexType(max_block_y), max_batch);
dim3 block(block_x, block_y);
dim3 grid;
const int grid_count = (keySlices + block_y - 1) / block_y;
TORCH_INTERNAL_ASSERT(getGridFromTiles(grid_count, grid),
"Too many slices to sort");
const auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (descending) {
const K invalid_key = at::numeric_limits<K>::lower_bound();
hipLaunchKernelGGL(( warpMergeSortKVInPlace<A, -1, sort_size, max_block_y>)
, dim3(grid), dim3(block), 0, stream,
keyInfo,
keySlices,
keySliceSize,
keySliceStride,
valueInfo,
valueSliceStride,
GTOp<K, true>(),
invalid_key);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
const K invalid_key = []{
// NAN is sorted after inf
if constexpr(has_nan<K>()) {
return K(NAN);
}
return at::numeric_limits<K>::upper_bound();
}();
hipLaunchKernelGGL(( warpMergeSortKVInPlace<A, -1, sort_size, max_block_y>)
, dim3(grid), dim3(block), 0, stream,
keyInfo,
keySlices,
keySliceSize,
keySliceStride,
valueInfo,
valueSliceStride,
LTOp<K, true>(),
invalid_key);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
};
// For medium sizes (128 < n <= 4096) use radixSortKVInplace.
struct MediumRadixSort {
template <int A, typename K, typename V, typename IndexType>
void sort(
at::cuda::detail::TensorInfo<K, IndexType> keyInfo,
IndexType keySlices,
IndexType keySliceSize,
IndexType keySliceStride,
at::cuda::detail::TensorInfo<V, IndexType> valueInfo,
IndexType valueSliceStride,
bool descending) {
#define HANDLE_CASE(SIZE, ITEMS_PER_THREAD) \
fixed_size_sort<A, SIZE, ITEMS_PER_THREAD>( \
keyInfo, \
keySlices, \
keySliceSize, \
keySliceStride, \
valueInfo, \
valueSliceStride, \
descending)
int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize);
TORCH_INTERNAL_ASSERT(ceilPowerOf2 <= 4096);
switch (ceilPowerOf2) {
case 4096:
HANDLE_CASE(4096, 32);
break;
case 2048:
HANDLE_CASE(2048, 32);
break;
case 1024:
case 512:
case 256:
HANDLE_CASE(1024, 32);
break;
case 128:
case 64:
case 32:
case 16:
case 8:
case 4:
case 2:
TORCH_INTERNAL_ASSERT(
false, "Expected size <= 128 to be handled by a different algorithm");
break;
case 1:
/* Nothing to do, data already sorted */
break;
default:
TORCH_INTERNAL_ASSERT(false);
}
#undef HANDLE_CASE
}
template <int A, int sort_size, int items_per_thread,
typename K, typename V, typename IndexType>
void fixed_size_sort(
at::cuda::detail::TensorInfo<K, IndexType> keyInfo,
IndexType keySlices,
IndexType keySliceSize,
IndexType keySliceStride,
at::cuda::detail::TensorInfo<V, IndexType> valueInfo,
IndexType valueSliceStride,
bool descending) {
static_assert(sort_size % items_per_thread == 0, "");
constexpr int block = sort_size / items_per_thread;
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(keySlices, grid),
"Too many slices to sort");
const auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( radixSortKVInPlace<A, -1, block, items_per_thread>)
, dim3(grid), dim3(block), 0, stream,
keyInfo,
keySlices,
keySliceSize,
keySliceStride,
valueInfo,
valueSliceStride,
descending);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
};
template <typename Sorter>
void sortCommon(Sorter sorter, const TensorBase &key, const TensorBase &value,
int dim, bool descending) {
TORCH_CHECK(key.sizes() == value.sizes(),
"Key tensor must have same size as value tensor");
int dims = value.dim();
TORCH_CHECK(dims <= MAX_DIMS, "value tensor has too many dimensions");
// if key and value tensors have the same size, we do not need to check both
ptrdiff_t inElements = key.numel();
if (inElements == 0) {
return;
}
int64_t keySliceSize = key.size(dim);
ptrdiff_t keySlices = inElements / keySliceSize;
#define HANDLE_SORT_CASE(TYPE, A) \
sorter.template sort<A>( \
keyInfo, \
(TYPE) keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
descending)
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, key.scalar_type(), "sortKeyValueInplace", [&] {
if (at::cuda::detail::canUse32BitIndexMath(key)) {
at::cuda::detail::TensorInfo<scalar_t, unsigned int> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(key);
at::cuda::detail::TensorInfo<int64_t, unsigned int> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, unsigned int>(value);
auto strideKey = keyInfo.strides[dim];
keyInfo.sizes[dim] = 1;
int collapseKeyDim = keyInfo.collapseDims(dim);
keyInfo.strides[collapseKeyDim] = strideKey;
auto strideValue = valueInfo.strides[dim];
valueInfo.sizes[dim]=1;
int collapseValueDim = valueInfo.collapseDims(dim);
valueInfo.strides[collapseValueDim] = strideValue;
if (keyInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned int, -2);
} else {
switch (keyInfo.dims) {
case 2:
HANDLE_SORT_CASE(unsigned int, 2);
break;
default:
HANDLE_SORT_CASE(unsigned int, -1);
break;
}
}
} else {
at::cuda::detail::TensorInfo<scalar_t, uint64_t> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(key);
at::cuda::detail::TensorInfo<int64_t, uint64_t> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, uint64_t>(value);
auto strideKey = keyInfo.strides[dim];
keyInfo.sizes[dim] = 1;
int collapseKeyDim = keyInfo.collapseDims(dim);
keyInfo.strides[collapseKeyDim] = strideKey;
auto strideValue = valueInfo.strides[dim];
valueInfo.sizes[dim]=1;
int collapseValueDim = valueInfo.collapseDims(dim);
valueInfo.strides[collapseValueDim] = strideValue;
// int64_t case is rare, just instantiate the generic version
HANDLE_SORT_CASE(uint64_t, -1);
}
});
#undef HANDLE_SORT_CASE
}
void sortKeyValueInplace(
const TensorBase& key,
const TensorBase& value,
int dim,
bool descending,
bool stable) {
const auto sort_size = key.size(dim);
if (sort_size <= 1) {
return; // Already sorted
} else if (!stable && sort_size <= 32) {
// NOTE: Bitonic sort is unstable
sortCommon(SmallBitonicSort{}, key, value, dim, descending);
} else if (sort_size <= 128) {
sortCommon(WarpMergeSort<128>{}, key, value, dim, descending);
} else {
sortCommon(MediumRadixSort{}, key, value, dim, descending);
}
}
} // namespace at::native
| aa1004b4522bf408b049b19241c3224d6ffb3840.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/cuda/Sort.h>
#include <ATen/core/TensorBase.h>
#include <ATen/core/Array.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/cub.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/cuda/NumericLimits.cuh>
#include <ATen/native/cuda/SortUtils.cuh>
#include <ATen/native/cuda/SortingCommon.cuh>
#include <limits>
#include <c10/core/DeviceArray.h>
namespace at::native {
template <typename T>
static int minimum_grid_for_occupancy(T kernel, int max_block_size) {
int minGridSize;
int blockSize;
C10_CUDA_CHECK(cudaOccupancyMaxPotentialBlockSize(
&minGridSize,
&blockSize,
kernel,
/*dynamicSMemSize=*/0,
max_block_size));
return minGridSize;
}
template <typename T>
constexpr bool has_nan() {
if constexpr (std::numeric_limits<T>::is_specialized) {
return std::numeric_limits<T>::has_quiet_NaN;
} else if constexpr (
c10::is_complex<T>::value ||
std::is_same_v<T, c10::BFloat16> ||
std::is_same_v<T, c10::Half>) {
return true;
}
}
// For very small unstable sorts (n <= 32), use bitonicSortKVInPlace
// which can sort multiple arrays within the same block of threads,
// improving occupancy.
struct SmallBitonicSort {
template <int A, typename K, typename V, typename IndexType>
void sort(
at::cuda::detail::TensorInfo<K, IndexType> keyInfo,
IndexType keySlices,
IndexType keySliceSize,
IndexType keySliceStride,
at::cuda::detail::TensorInfo<V, IndexType> valueInfo,
IndexType valueSliceStride,
bool descending) {
constexpr int sort_size = 32;
constexpr int max_block_y = 16;
constexpr int items_per_thread = 2;
static_assert(sort_size % items_per_thread == 0, "");
constexpr int block_x = sort_size / items_per_thread;
TORCH_INTERNAL_ASSERT(keySliceSize <= sort_size);
// Scale batch size down if the grid would be too small
const auto min_grid = minimum_grid_for_occupancy(
bitonicSortKVInPlace<
A, -1, block_x, max_block_y,
K, V, LTOp<K, true>, IndexType>,
block_x * max_block_y);
const auto max_batch = std::max(IndexType{1}, keySlices / min_grid);
const int block_y = std::min(IndexType(max_block_y), max_batch);
dim3 block(block_x, block_y);
dim3 grid;
const int grid_count = (keySlices + block_y - 1) / block_y;
TORCH_INTERNAL_ASSERT(getGridFromTiles(grid_count, grid),
"Too many slices to sort");
const auto stream = at::cuda::getCurrentCUDAStream();
if (descending) {
bitonicSortKVInPlace<A, -1, block_x, max_block_y>
<<<grid, block, 0, stream>>>(
keyInfo,
keySlices,
keySliceSize,
keySliceStride,
valueInfo,
valueSliceStride,
GTOp<K, true>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
bitonicSortKVInPlace<A, -1, block_x, max_block_y>
<<<grid, block, 0, stream>>>(
keyInfo,
keySlices,
keySliceSize,
keySliceStride,
valueInfo,
valueSliceStride,
LTOp<K, true>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
};
// For small sorts (n <= 128) we use warpMergeSortKVInPlace which
// sorts one slice per warp and potentially multiple slices in the
// same block for improved occupancy with large batch sizes.
template <int sort_size>
struct WarpMergeSort {
template <int A, typename K, typename V, typename IndexType>
void sort(
at::cuda::detail::TensorInfo<K, IndexType> keyInfo,
IndexType keySlices,
IndexType keySliceSize,
IndexType keySliceStride,
at::cuda::detail::TensorInfo<V, IndexType> valueInfo,
IndexType valueSliceStride,
bool descending) {
constexpr int max_block_y = 16;
const int block_x = at::cuda::warp_size();
TORCH_INTERNAL_ASSERT(keySliceSize <= sort_size);
// Scale batch size down if the grid would be too small
const auto min_grid = minimum_grid_for_occupancy(
warpMergeSortKVInPlace<
A, -1, sort_size, max_block_y,
K, V, LTOp<K, true>, IndexType>,
block_x * max_block_y);
const auto max_batch = std::max(IndexType{1}, keySlices / min_grid);
const int block_y = std::min(IndexType(max_block_y), max_batch);
dim3 block(block_x, block_y);
dim3 grid;
const int grid_count = (keySlices + block_y - 1) / block_y;
TORCH_INTERNAL_ASSERT(getGridFromTiles(grid_count, grid),
"Too many slices to sort");
const auto stream = at::cuda::getCurrentCUDAStream();
if (descending) {
const K invalid_key = at::numeric_limits<K>::lower_bound();
warpMergeSortKVInPlace<A, -1, sort_size, max_block_y>
<<<grid, block, 0, stream>>>(
keyInfo,
keySlices,
keySliceSize,
keySliceStride,
valueInfo,
valueSliceStride,
GTOp<K, true>(),
invalid_key);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
const K invalid_key = []{
// NAN is sorted after inf
if constexpr(has_nan<K>()) {
return K(NAN);
}
return at::numeric_limits<K>::upper_bound();
}();
warpMergeSortKVInPlace<A, -1, sort_size, max_block_y>
<<<grid, block, 0, stream>>>(
keyInfo,
keySlices,
keySliceSize,
keySliceStride,
valueInfo,
valueSliceStride,
LTOp<K, true>(),
invalid_key);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
};
// For medium sizes (128 < n <= 4096) use radixSortKVInplace.
struct MediumRadixSort {
template <int A, typename K, typename V, typename IndexType>
void sort(
at::cuda::detail::TensorInfo<K, IndexType> keyInfo,
IndexType keySlices,
IndexType keySliceSize,
IndexType keySliceStride,
at::cuda::detail::TensorInfo<V, IndexType> valueInfo,
IndexType valueSliceStride,
bool descending) {
#define HANDLE_CASE(SIZE, ITEMS_PER_THREAD) \
fixed_size_sort<A, SIZE, ITEMS_PER_THREAD>( \
keyInfo, \
keySlices, \
keySliceSize, \
keySliceStride, \
valueInfo, \
valueSliceStride, \
descending)
int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize);
TORCH_INTERNAL_ASSERT(ceilPowerOf2 <= 4096);
switch (ceilPowerOf2) {
case 4096:
HANDLE_CASE(4096, 32);
break;
case 2048:
HANDLE_CASE(2048, 32);
break;
case 1024:
case 512:
case 256:
HANDLE_CASE(1024, 32);
break;
case 128:
case 64:
case 32:
case 16:
case 8:
case 4:
case 2:
TORCH_INTERNAL_ASSERT(
false, "Expected size <= 128 to be handled by a different algorithm");
break;
case 1:
/* Nothing to do, data already sorted */
break;
default:
TORCH_INTERNAL_ASSERT(false);
}
#undef HANDLE_CASE
}
template <int A, int sort_size, int items_per_thread,
typename K, typename V, typename IndexType>
void fixed_size_sort(
at::cuda::detail::TensorInfo<K, IndexType> keyInfo,
IndexType keySlices,
IndexType keySliceSize,
IndexType keySliceStride,
at::cuda::detail::TensorInfo<V, IndexType> valueInfo,
IndexType valueSliceStride,
bool descending) {
static_assert(sort_size % items_per_thread == 0, "");
constexpr int block = sort_size / items_per_thread;
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(keySlices, grid),
"Too many slices to sort");
const auto stream = at::cuda::getCurrentCUDAStream();
radixSortKVInPlace<A, -1, block, items_per_thread>
<<<grid, block, 0, stream>>>(
keyInfo,
keySlices,
keySliceSize,
keySliceStride,
valueInfo,
valueSliceStride,
descending);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
};
template <typename Sorter>
void sortCommon(Sorter sorter, const TensorBase &key, const TensorBase &value,
int dim, bool descending) {
TORCH_CHECK(key.sizes() == value.sizes(),
"Key tensor must have same size as value tensor");
int dims = value.dim();
TORCH_CHECK(dims <= MAX_DIMS, "value tensor has too many dimensions");
// if key and value tensors have the same size, we do not need to check both
ptrdiff_t inElements = key.numel();
if (inElements == 0) {
return;
}
int64_t keySliceSize = key.size(dim);
ptrdiff_t keySlices = inElements / keySliceSize;
#define HANDLE_SORT_CASE(TYPE, A) \
sorter.template sort<A>( \
keyInfo, \
(TYPE) keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
descending)
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, key.scalar_type(), "sortKeyValueInplace", [&] {
if (at::cuda::detail::canUse32BitIndexMath(key)) {
at::cuda::detail::TensorInfo<scalar_t, unsigned int> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(key);
at::cuda::detail::TensorInfo<int64_t, unsigned int> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, unsigned int>(value);
auto strideKey = keyInfo.strides[dim];
keyInfo.sizes[dim] = 1;
int collapseKeyDim = keyInfo.collapseDims(dim);
keyInfo.strides[collapseKeyDim] = strideKey;
auto strideValue = valueInfo.strides[dim];
valueInfo.sizes[dim]=1;
int collapseValueDim = valueInfo.collapseDims(dim);
valueInfo.strides[collapseValueDim] = strideValue;
if (keyInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned int, -2);
} else {
switch (keyInfo.dims) {
case 2:
HANDLE_SORT_CASE(unsigned int, 2);
break;
default:
HANDLE_SORT_CASE(unsigned int, -1);
break;
}
}
} else {
at::cuda::detail::TensorInfo<scalar_t, uint64_t> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(key);
at::cuda::detail::TensorInfo<int64_t, uint64_t> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, uint64_t>(value);
auto strideKey = keyInfo.strides[dim];
keyInfo.sizes[dim] = 1;
int collapseKeyDim = keyInfo.collapseDims(dim);
keyInfo.strides[collapseKeyDim] = strideKey;
auto strideValue = valueInfo.strides[dim];
valueInfo.sizes[dim]=1;
int collapseValueDim = valueInfo.collapseDims(dim);
valueInfo.strides[collapseValueDim] = strideValue;
// int64_t case is rare, just instantiate the generic version
HANDLE_SORT_CASE(uint64_t, -1);
}
});
#undef HANDLE_SORT_CASE
}
void sortKeyValueInplace(
const TensorBase& key,
const TensorBase& value,
int dim,
bool descending,
bool stable) {
const auto sort_size = key.size(dim);
if (sort_size <= 1) {
return; // Already sorted
} else if (!stable && sort_size <= 32) {
// NOTE: Bitonic sort is unstable
sortCommon(SmallBitonicSort{}, key, value, dim, descending);
} else if (sort_size <= 128) {
sortCommon(WarpMergeSort<128>{}, key, value, dim, descending);
} else {
sortCommon(MediumRadixSort{}, key, value, dim, descending);
}
}
} // namespace at::native
|
b28f874e874a1a3a9dda28651da09d08af2e68e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Utils.h"
const int MAX_FILTER_SIZE = 127;
// The filter will also be stored in constant memory
__constant__ float d_cF[MAX_FILTER_SIZE];
// ============
// CUDA kernels
// ============
// The original kernel
__global__ void deviceConvolutionKernel(const float * d_a, const float * d_f, float * d_b,
const int aSize, const int fSize)
{
int tIdx = blockIdx.x * blockDim.x + threadIdx.x; // Global ID of the current thread
const int fHalfSize = fSize / 2; // Half the size of the filter
// Ignore threads that will not be used
if (tIdx >= aSize)
{
return;
}
// Border threads just copy the input to the output (filter cannot be applied)
if ((tIdx < fHalfSize) || (tIdx >= aSize - fHalfSize))
{
d_b[tIdx] = d_a[tIdx];
return;
}
// All other threads apply the filter
float out = 0.0F;
for (int i = 0; i < fSize; ++i)
{
out += d_a[tIdx - fHalfSize + i] * d_f[i];
}
// Write the filtered element
d_b[tIdx] = out;
}
// The improved kernel
__global__ void deviceConvolutionKernelOptimized(const float * d_a, float * d_b, const int aSize, const int fSize)
{
__shared__ float tile[512 + MAX_FILTER_SIZE - 1]; // Block-shared tile holding array entries
const int fHalfSize = fSize >> 1; // Half the size of the filter
int tIdx = threadIdx.x; // Local thread index
int gBIdx = blockIdx.x * blockDim.x; // Global block index
int gIdx = gBIdx + tIdx; // Global thread index
// The global bounds of the array (all elements in between them are loaded to shared memory)
int gMinIndex = max(gBIdx - fHalfSize, 0);
int gMaxIndex = min(gBIdx + blockDim.x + fHalfSize, aSize) - 1;
// An offset used to align active threads to their respective elements in shared memory
int offset = gMinIndex + fHalfSize - gBIdx;
// Load all required elements in shared memory (note the coalesced memory access)
for (int crtIdx = gMinIndex + tIdx; crtIdx <= gMaxIndex; crtIdx += blockDim.x)
{
tile[crtIdx - gMinIndex] = d_a[crtIdx];
}
// Wait for the entire tile to be populated
__syncthreads();
// Unneeded threads can be ignored
if (gIdx < aSize)
{
if ((gIdx < fHalfSize) || (gIdx >= aSize - fHalfSize))
{
// Global edge threads just copy the input to the output
d_b[gIdx] = d_a[gIdx];
}
else
{
// Global inner threads apply the filter
float out = 0.0F;
for (int i = 0; i < fSize; ++i)
{
out += tile[tIdx - offset + i] * d_cF[i];
}
d_b[gIdx] = out;
}
}
}
// ========================================
// Command-line argument validation wrapper
// ========================================
void validateArgumentsWrapper(int argCount, char ** argValues, int * vGridConf)
{
const char * vErrMessages[3] = {"Error: The number of threads must be greater than 0.",
"Error: The array size must be greater than 0.",
"Error: The filter size must be greater than 0."};
validateArguments(argCount, 3, argValues, vGridConf, vErrMessages);
VALIDATE(vGridConf[3] % 2 == 1, "Error: The filter size must be an odd value.");
VALIDATE(vGridConf[3] <= vGridConf[2], "Error: The filter size cannot exceed the array size.");
VALIDATE(vGridConf[3] <= MAX_FILTER_SIZE, "Error: The filter size is too big.");
}
// =============================
// Host-based filter application
// =============================
float hostFilter(const float * h_a, const float * h_f, float * h_b, const int aSize, const int fSize)
{
const int fHalfSize = fSize / 2;
long long startMoment;
float pcFreq;
// Start the timer
hostTimerStart(&pcFreq, &startMoment);
for(int i = 0; i < aSize; ++i)
{
if ((i >= fHalfSize) && (i < aSize - fHalfSize))
{
// An element is filtered only if the entire filter can be applied to it
float out = 0.0F;
for (int j = 0; j < fSize; ++j)
{
out += h_f[j] * h_a[i - fHalfSize + j];
}
h_b[i] = out;
}
else
{
// For border elements we don't apply the filter
h_b[i] = h_a[i];
}
}
// Return the time in microseconds
return hostTimerStop(pcFreq, startMoment);
}
// ===============================
// Device-based filter application
// ===============================
float deviceFilter(const float * h_a, const float * h_f, float * h_b, const int aSize, const int fSize,
int nBlocks, int nThreadsPerBlock, const int bOptimized)
{
float * d_a = NULL; // Device-based copy of input array
float * d_b = NULL; // Device-generated output array
float * d_f = NULL; // Device-based copy of the filter (unoptimized version only)
int byteSize = aSize * sizeof(float);
hipEvent_t start, stop;
float time;
// Create events for measuring elapsed time on the device
SAFE_CUDA_CALL(hipEventCreate(&start));
SAFE_CUDA_CALL(hipEventCreate(&stop));
// Allocate and fill the device-based arrays
generateDeviceData(byteSize, &d_a, h_a, TRUE);
generateDeviceData(byteSize, &d_b, NULL, FALSE);
if (bOptimized)
{
// The filter is copied to constant memory
SAFE_CUDA_CALL(hipMemcpyToSymbol(d_cF, h_f, fSize * sizeof(float)));
}
else
{
// The filter is copied to the device
generateDeviceData(fSize * sizeof(float), &d_f, h_f, TRUE);
}
// Record the starting moment of the vector addition
SAFE_CUDA_CALL(hipEventRecord(start, 0));
// Launch the kernel (asynchronously)
if (bOptimized)
{
hipLaunchKernelGGL(( deviceConvolutionKernelOptimized), dim3(nBlocks), dim3(nThreadsPerBlock), 0, 0, d_a, d_b, aSize, fSize);
}
else
{
hipLaunchKernelGGL(( deviceConvolutionKernel), dim3(nBlocks), dim3(nThreadsPerBlock), 0, 0, d_a, d_f, d_b, aSize, fSize);
}
// Record the ending moment of the vector addition and synchronized
SAFE_CUDA_CALL(hipEventRecord(stop, 0));
SAFE_CUDA_CALL(hipEventSynchronize(stop));
// Calculate the elapsed time
SAFE_CUDA_CALL(hipEventElapsedTime(&time, start, stop));
// Transfer the computed array back to the host
SAFE_CUDA_CALL(hipMemcpy(h_b, d_b, byteSize, hipMemcpyDeviceToHost));
// Free used resources on the device
SAFE_CUDA_CALL(hipEventDestroy(start));
SAFE_CUDA_CALL(hipEventDestroy(stop));
SAFE_CUDA_CALL(hipFree(d_a));
SAFE_CUDA_CALL(hipFree(d_b));
if (d_f != NULL)
{
SAFE_CUDA_CALL(hipFree(d_f));
}
return time * 1.0E+3F;
}
// =======================================
// Wrapper over the device kernel launcher
// =======================================
float deviceFilterWrapper(const float * h_a, const float * h_f, float * h_b_d, int aSize, int fSize,
int nBlocks, int nThreadsPerBlock, int bOptimized, const float * h_b_h, float hostTime)
{
float devTime = deviceFilter(h_a, h_f, h_b_d, aSize, fSize, nBlocks, nThreadsPerBlock, bOptimized);
printf("Device filtering time (%s kernel): %.2f us" NEW_LINE, bOptimized? "optimized" : "simple", devTime);
// Compute the speed-up between the device and the host
printf("Speed-up (%s kernel): %.2f" NEW_LINE, bOptimized? "optimized" : "simple", hostTime / devTime);
// Check if the calculated arrays match
compareResults(h_b_h, h_b_d, aSize);
return devTime;
}
// =======================
// Application entry point
// =======================
int _03_1D_Convolution(int argCount, char ** argValues)
{
int vGridConf[4];
float * h_a; // Host-based input array
float * h_f; // Host-based input filter
float * h_b_h; // Host-based output array generated by the host
float * h_b_d; // Host-based output array generated by the device
float hostTime, devTime1, devTime2;
// Validate all command-line arguments
validateArgumentsWrapper(argCount, argValues, vGridConf);
// Generate the host-based data
generateHostData(vGridConf[2], &h_a, TRUE);
generateHostData(vGridConf[3], &h_f, TRUE);
generateHostData(vGridConf[2], &h_b_h, FALSE);
generateHostData(vGridConf[2], &h_b_d, FALSE);
printf("Data generation complete." NEW_LINE);
// Perform the host-based filtering
hostTime = hostFilter(h_a, h_f, h_b_h, vGridConf[2], vGridConf[3]);
printf("Host addition time: %.2f us" NEW_LINE, hostTime);
printf("Will launch (B: %d, T: %d, G: %d, F: %d)" NEW_LINE,
vGridConf[0], vGridConf[1], vGridConf[2], vGridConf[3]);
// Perfrm the device-based filtering
devTime1 = deviceFilterWrapper(h_a, h_f, h_b_d, vGridConf[2], vGridConf[3], vGridConf[0], vGridConf[1],
FALSE, h_b_h, hostTime);
devTime2 = deviceFilterWrapper(h_a, h_f, h_b_d, vGridConf[2], vGridConf[3], vGridConf[0], vGridConf[1],
TRUE, h_b_h, hostTime);
printf("Device kernel speed-up: %.2f" NEW_LINE, devTime1 / devTime2);
free(h_a);
free(h_f);
free(h_b_h);
free(h_b_d);
WAIT_AND_EXIT(0);
} | b28f874e874a1a3a9dda28651da09d08af2e68e2.cu | #include "Utils.h"
const int MAX_FILTER_SIZE = 127;
// The filter will also be stored in constant memory
__constant__ float d_cF[MAX_FILTER_SIZE];
// ============
// CUDA kernels
// ============
// The original kernel
__global__ void deviceConvolutionKernel(const float * d_a, const float * d_f, float * d_b,
const int aSize, const int fSize)
{
int tIdx = blockIdx.x * blockDim.x + threadIdx.x; // Global ID of the current thread
const int fHalfSize = fSize / 2; // Half the size of the filter
// Ignore threads that will not be used
if (tIdx >= aSize)
{
return;
}
// Border threads just copy the input to the output (filter cannot be applied)
if ((tIdx < fHalfSize) || (tIdx >= aSize - fHalfSize))
{
d_b[tIdx] = d_a[tIdx];
return;
}
// All other threads apply the filter
float out = 0.0F;
for (int i = 0; i < fSize; ++i)
{
out += d_a[tIdx - fHalfSize + i] * d_f[i];
}
// Write the filtered element
d_b[tIdx] = out;
}
// The improved kernel
__global__ void deviceConvolutionKernelOptimized(const float * d_a, float * d_b, const int aSize, const int fSize)
{
__shared__ float tile[512 + MAX_FILTER_SIZE - 1]; // Block-shared tile holding array entries
const int fHalfSize = fSize >> 1; // Half the size of the filter
int tIdx = threadIdx.x; // Local thread index
int gBIdx = blockIdx.x * blockDim.x; // Global block index
int gIdx = gBIdx + tIdx; // Global thread index
// The global bounds of the array (all elements in between them are loaded to shared memory)
int gMinIndex = max(gBIdx - fHalfSize, 0);
int gMaxIndex = min(gBIdx + blockDim.x + fHalfSize, aSize) - 1;
// An offset used to align active threads to their respective elements in shared memory
int offset = gMinIndex + fHalfSize - gBIdx;
// Load all required elements in shared memory (note the coalesced memory access)
for (int crtIdx = gMinIndex + tIdx; crtIdx <= gMaxIndex; crtIdx += blockDim.x)
{
tile[crtIdx - gMinIndex] = d_a[crtIdx];
}
// Wait for the entire tile to be populated
__syncthreads();
// Unneeded threads can be ignored
if (gIdx < aSize)
{
if ((gIdx < fHalfSize) || (gIdx >= aSize - fHalfSize))
{
// Global edge threads just copy the input to the output
d_b[gIdx] = d_a[gIdx];
}
else
{
// Global inner threads apply the filter
float out = 0.0F;
for (int i = 0; i < fSize; ++i)
{
out += tile[tIdx - offset + i] * d_cF[i];
}
d_b[gIdx] = out;
}
}
}
// ========================================
// Command-line argument validation wrapper
// ========================================
void validateArgumentsWrapper(int argCount, char ** argValues, int * vGridConf)
{
const char * vErrMessages[3] = {"Error: The number of threads must be greater than 0.",
"Error: The array size must be greater than 0.",
"Error: The filter size must be greater than 0."};
validateArguments(argCount, 3, argValues, vGridConf, vErrMessages);
VALIDATE(vGridConf[3] % 2 == 1, "Error: The filter size must be an odd value.");
VALIDATE(vGridConf[3] <= vGridConf[2], "Error: The filter size cannot exceed the array size.");
VALIDATE(vGridConf[3] <= MAX_FILTER_SIZE, "Error: The filter size is too big.");
}
// =============================
// Host-based filter application
// =============================
float hostFilter(const float * h_a, const float * h_f, float * h_b, const int aSize, const int fSize)
{
const int fHalfSize = fSize / 2;
long long startMoment;
float pcFreq;
// Start the timer
hostTimerStart(&pcFreq, &startMoment);
for(int i = 0; i < aSize; ++i)
{
if ((i >= fHalfSize) && (i < aSize - fHalfSize))
{
// An element is filtered only if the entire filter can be applied to it
float out = 0.0F;
for (int j = 0; j < fSize; ++j)
{
out += h_f[j] * h_a[i - fHalfSize + j];
}
h_b[i] = out;
}
else
{
// For border elements we don't apply the filter
h_b[i] = h_a[i];
}
}
// Return the time in microseconds
return hostTimerStop(pcFreq, startMoment);
}
// ===============================
// Device-based filter application
// ===============================
float deviceFilter(const float * h_a, const float * h_f, float * h_b, const int aSize, const int fSize,
int nBlocks, int nThreadsPerBlock, const int bOptimized)
{
float * d_a = NULL; // Device-based copy of input array
float * d_b = NULL; // Device-generated output array
float * d_f = NULL; // Device-based copy of the filter (unoptimized version only)
int byteSize = aSize * sizeof(float);
cudaEvent_t start, stop;
float time;
// Create events for measuring elapsed time on the device
SAFE_CUDA_CALL(cudaEventCreate(&start));
SAFE_CUDA_CALL(cudaEventCreate(&stop));
// Allocate and fill the device-based arrays
generateDeviceData(byteSize, &d_a, h_a, TRUE);
generateDeviceData(byteSize, &d_b, NULL, FALSE);
if (bOptimized)
{
// The filter is copied to constant memory
SAFE_CUDA_CALL(cudaMemcpyToSymbol(d_cF, h_f, fSize * sizeof(float)));
}
else
{
// The filter is copied to the device
generateDeviceData(fSize * sizeof(float), &d_f, h_f, TRUE);
}
// Record the starting moment of the vector addition
SAFE_CUDA_CALL(cudaEventRecord(start, 0));
// Launch the kernel (asynchronously)
if (bOptimized)
{
deviceConvolutionKernelOptimized<<<nBlocks, nThreadsPerBlock>>>(d_a, d_b, aSize, fSize);
}
else
{
deviceConvolutionKernel<<<nBlocks, nThreadsPerBlock>>>(d_a, d_f, d_b, aSize, fSize);
}
// Record the ending moment of the vector addition and synchronized
SAFE_CUDA_CALL(cudaEventRecord(stop, 0));
SAFE_CUDA_CALL(cudaEventSynchronize(stop));
// Calculate the elapsed time
SAFE_CUDA_CALL(cudaEventElapsedTime(&time, start, stop));
// Transfer the computed array back to the host
SAFE_CUDA_CALL(cudaMemcpy(h_b, d_b, byteSize, cudaMemcpyDeviceToHost));
// Free used resources on the device
SAFE_CUDA_CALL(cudaEventDestroy(start));
SAFE_CUDA_CALL(cudaEventDestroy(stop));
SAFE_CUDA_CALL(cudaFree(d_a));
SAFE_CUDA_CALL(cudaFree(d_b));
if (d_f != NULL)
{
SAFE_CUDA_CALL(cudaFree(d_f));
}
return time * 1.0E+3F;
}
// =======================================
// Wrapper over the device kernel launcher
// =======================================
float deviceFilterWrapper(const float * h_a, const float * h_f, float * h_b_d, int aSize, int fSize,
int nBlocks, int nThreadsPerBlock, int bOptimized, const float * h_b_h, float hostTime)
{
float devTime = deviceFilter(h_a, h_f, h_b_d, aSize, fSize, nBlocks, nThreadsPerBlock, bOptimized);
printf("Device filtering time (%s kernel): %.2f us" NEW_LINE, bOptimized? "optimized" : "simple", devTime);
// Compute the speed-up between the device and the host
printf("Speed-up (%s kernel): %.2f" NEW_LINE, bOptimized? "optimized" : "simple", hostTime / devTime);
// Check if the calculated arrays match
compareResults(h_b_h, h_b_d, aSize);
return devTime;
}
// =======================
// Application entry point
// =======================
int _03_1D_Convolution(int argCount, char ** argValues)
{
int vGridConf[4];
float * h_a; // Host-based input array
float * h_f; // Host-based input filter
float * h_b_h; // Host-based output array generated by the host
float * h_b_d; // Host-based output array generated by the device
float hostTime, devTime1, devTime2;
// Validate all command-line arguments
validateArgumentsWrapper(argCount, argValues, vGridConf);
// Generate the host-based data
generateHostData(vGridConf[2], &h_a, TRUE);
generateHostData(vGridConf[3], &h_f, TRUE);
generateHostData(vGridConf[2], &h_b_h, FALSE);
generateHostData(vGridConf[2], &h_b_d, FALSE);
printf("Data generation complete." NEW_LINE);
// Perform the host-based filtering
hostTime = hostFilter(h_a, h_f, h_b_h, vGridConf[2], vGridConf[3]);
printf("Host addition time: %.2f us" NEW_LINE, hostTime);
printf("Will launch (B: %d, T: %d, G: %d, F: %d)" NEW_LINE,
vGridConf[0], vGridConf[1], vGridConf[2], vGridConf[3]);
// Perfrm the device-based filtering
devTime1 = deviceFilterWrapper(h_a, h_f, h_b_d, vGridConf[2], vGridConf[3], vGridConf[0], vGridConf[1],
FALSE, h_b_h, hostTime);
devTime2 = deviceFilterWrapper(h_a, h_f, h_b_d, vGridConf[2], vGridConf[3], vGridConf[0], vGridConf[1],
TRUE, h_b_h, hostTime);
printf("Device kernel speed-up: %.2f" NEW_LINE, devTime1 / devTime2);
free(h_a);
free(h_f);
free(h_b_h);
free(h_b_d);
WAIT_AND_EXIT(0);
} |
5146154bd6c952154999260913e0df676711c398.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "slicer.cuh"
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <stdio.h>
#define YNONE INT_MIN
__device__ __forceinline__
int pixelRayIntersectionY(triangle t, int x, int z);
__global__
void pps(triangle* triangles_global, size_t num_triangles, bool* out, unsigned base_layer) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
// printf("starting thread %d\n", idx);
int z_idx = idx / X_DIM;
// if (y >= Y_DIM) return;
int x_idx = idx % X_DIM;
int x = x_idx - (X_DIM / 2);
int z = z_idx + base_layer;
// Copy triangles to shared memory
// Each block has a shared memory storing some triangles.
__shared__ triangle tri_base[THREADS_PER_BLOCK];
triangle* triangles = (triangle*) tri_base;
size_t num_iters = num_triangles / THREADS_PER_BLOCK;
int length = 0;
int yints[MAX_TRUNK_SIZE+1];
for (size_t i = 0; i < num_iters; i++) {
triangles[threadIdx.x] = triangles_global[threadIdx.x + (i * THREADS_PER_BLOCK)];
// Wait for other threads to complete;
__syncthreads();
if (z < NUM_LAYERS) {
for (size_t tri_idx = 0; tri_idx < THREADS_PER_BLOCK; tri_idx++) {
int intersection = pixelRayIntersectionY(triangles[tri_idx], x, z);
if (intersection != YNONE) {
yints[length] = intersection;
length++;
}
}
}
__syncthreads();
}
size_t remaining = num_triangles - (num_iters * THREADS_PER_BLOCK);
if (threadIdx.x < remaining) {
triangles[threadIdx.x] = triangles_global[threadIdx.x + (num_iters * THREADS_PER_BLOCK)];
}
__syncthreads();
if (remaining && z < NUM_LAYERS) {
for (size_t tri_idx = 0; tri_idx < remaining; tri_idx++) {
int intersection = pixelRayIntersectionY(triangles[tri_idx], x, z);
if (intersection != YNONE) {
yints[length] = intersection;
length++;
}
}
}
if (z >= NUM_LAYERS) return;
thrust::sort(thrust::device, &yints[0], &yints[length]);
yints[length] = Y_MAX;
if (length > MAX_TRUNK_SIZE)
printf("Error: Too many intersections.\n \
Please increase MAX_TRUNK_SIZE in slicer.cuh and recompile.\n");
bool flag = false;
int layerIdx = 0;
for (int y = Y_MIN; y < Y_MAX; y++) {
// If intersect
while (yints[layerIdx] < y) layerIdx++;
bool intersect = (y == yints[layerIdx]);
flag = (bool) (layerIdx & 1);
unsigned y_idx = y - Y_MIN;
out[z_idx*Y_DIM*X_DIM + y_idx*X_DIM + x_idx] = intersect || flag;
}
}
/**
* pixelRayIntersection: helper function, computes the intersection of given triangle and pixel ray
* Inputs:
* t -- input triangle
* x, y -- coordinates of the input pixel ray
* Returns:
* The layer on which they intersect, or -1 if no intersection
*/
__device__ __forceinline__
int pixelRayIntersectionY(triangle t, int x, int z) {
/*
Let A, B, C be the 3 vertices of the given triangle
Let S(x,y,z) be the intersection, where x,y are given
We want to find some a, b such that AS = a*AB + b*AC
If a >= 0, b >= 0, and a+b <= 1, S is a valid intersection.
*/
double x_max = max(t.p1.x, max(t.p2.x, t.p3.x));
double x_min = min(t.p1.x, min(t.p2.x, t.p3.x));
double z_max = max(t.p1.z, max(t.p2.z, t.p3.z));
double z_min = min(t.p1.z, min(t.p2.z, t.p3.z));
double x_pos = x * RESOLUTION;
double z_pos = z * RESOLUTION;
if ((x_pos < x_min) || (x_pos > x_max) || (z_pos < z_min) || (z_pos > z_max)) return YNONE;
double x_d = x_pos - t.p1.x;
double z_d = z_pos - t.p1.z;
double x1 = t.p2.x - t.p1.x;
double y1 = t.p2.y - t.p1.y;
double z1 = t.p2.z - t.p1.z;
double x2 = t.p3.x - t.p1.x;
double y2 = t.p3.y - t.p1.y;
double z2 = t.p3.z - t.p1.z;
double a = (x_d * z2 - x2 * z_d) / (x1 * z2 - x2 * z1);
double b = (x_d * z1 - x1 * z_d) / (x2 * z1 - x1 * z2);
bool inside = (a >= 0) && (b >= 0) && (a+b <= 1);
double intersection = (a * y1 + b * y2) + t.p1.y;
// // divide by layer width
return inside ? (intersection / RESOLUTION) : YNONE;
}
__global__
void triangleSelect(triangle* in, triangle* out, unsigned in_length,
unsigned* out_length, unsigned base_layer)
{
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t total_threads = blockDim.x * gridDim.x;
double min_height = base_layer * RESOLUTION;
double max_height = (base_layer + BLOCK_HEIGHT) * RESOLUTION;
while (idx < in_length) {
triangle t = in[idx];
idx += total_threads;
double z_min = min(t.p1.z, min(t.p2.z, t.p3.z));
if (z_min > max_height) continue;
double z_max = max(t.p1.z, max(t.p2.z, t.p3.z));
if (z_max < min_height) continue;
size_t curr_length = atomicAdd(out_length, 1);
out[curr_length] = t;
}
}
| 5146154bd6c952154999260913e0df676711c398.cu | #include "slicer.cuh"
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <stdio.h>
#define YNONE INT_MIN
__device__ __forceinline__
int pixelRayIntersectionY(triangle t, int x, int z);
__global__
void pps(triangle* triangles_global, size_t num_triangles, bool* out, unsigned base_layer) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
// printf("starting thread %d\n", idx);
int z_idx = idx / X_DIM;
// if (y >= Y_DIM) return;
int x_idx = idx % X_DIM;
int x = x_idx - (X_DIM / 2);
int z = z_idx + base_layer;
// Copy triangles to shared memory
// Each block has a shared memory storing some triangles.
__shared__ triangle tri_base[THREADS_PER_BLOCK];
triangle* triangles = (triangle*) tri_base;
size_t num_iters = num_triangles / THREADS_PER_BLOCK;
int length = 0;
int yints[MAX_TRUNK_SIZE+1];
for (size_t i = 0; i < num_iters; i++) {
triangles[threadIdx.x] = triangles_global[threadIdx.x + (i * THREADS_PER_BLOCK)];
// Wait for other threads to complete;
__syncthreads();
if (z < NUM_LAYERS) {
for (size_t tri_idx = 0; tri_idx < THREADS_PER_BLOCK; tri_idx++) {
int intersection = pixelRayIntersectionY(triangles[tri_idx], x, z);
if (intersection != YNONE) {
yints[length] = intersection;
length++;
}
}
}
__syncthreads();
}
size_t remaining = num_triangles - (num_iters * THREADS_PER_BLOCK);
if (threadIdx.x < remaining) {
triangles[threadIdx.x] = triangles_global[threadIdx.x + (num_iters * THREADS_PER_BLOCK)];
}
__syncthreads();
if (remaining && z < NUM_LAYERS) {
for (size_t tri_idx = 0; tri_idx < remaining; tri_idx++) {
int intersection = pixelRayIntersectionY(triangles[tri_idx], x, z);
if (intersection != YNONE) {
yints[length] = intersection;
length++;
}
}
}
if (z >= NUM_LAYERS) return;
thrust::sort(thrust::device, &yints[0], &yints[length]);
yints[length] = Y_MAX;
if (length > MAX_TRUNK_SIZE)
printf("Error: Too many intersections.\n \
Please increase MAX_TRUNK_SIZE in slicer.cuh and recompile.\n");
bool flag = false;
int layerIdx = 0;
for (int y = Y_MIN; y < Y_MAX; y++) {
// If intersect
while (yints[layerIdx] < y) layerIdx++;
bool intersect = (y == yints[layerIdx]);
flag = (bool) (layerIdx & 1);
unsigned y_idx = y - Y_MIN;
out[z_idx*Y_DIM*X_DIM + y_idx*X_DIM + x_idx] = intersect || flag;
}
}
/**
* pixelRayIntersection: helper function, computes the intersection of given triangle and pixel ray
* Inputs:
* t -- input triangle
* x, y -- coordinates of the input pixel ray
* Returns:
* The layer on which they intersect, or -1 if no intersection
*/
__device__ __forceinline__
int pixelRayIntersectionY(triangle t, int x, int z) {
/*
Let A, B, C be the 3 vertices of the given triangle
Let S(x,y,z) be the intersection, where x,y are given
We want to find some a, b such that AS = a*AB + b*AC
If a >= 0, b >= 0, and a+b <= 1, S is a valid intersection.
*/
double x_max = max(t.p1.x, max(t.p2.x, t.p3.x));
double x_min = min(t.p1.x, min(t.p2.x, t.p3.x));
double z_max = max(t.p1.z, max(t.p2.z, t.p3.z));
double z_min = min(t.p1.z, min(t.p2.z, t.p3.z));
double x_pos = x * RESOLUTION;
double z_pos = z * RESOLUTION;
if ((x_pos < x_min) || (x_pos > x_max) || (z_pos < z_min) || (z_pos > z_max)) return YNONE;
double x_d = x_pos - t.p1.x;
double z_d = z_pos - t.p1.z;
double x1 = t.p2.x - t.p1.x;
double y1 = t.p2.y - t.p1.y;
double z1 = t.p2.z - t.p1.z;
double x2 = t.p3.x - t.p1.x;
double y2 = t.p3.y - t.p1.y;
double z2 = t.p3.z - t.p1.z;
double a = (x_d * z2 - x2 * z_d) / (x1 * z2 - x2 * z1);
double b = (x_d * z1 - x1 * z_d) / (x2 * z1 - x1 * z2);
bool inside = (a >= 0) && (b >= 0) && (a+b <= 1);
double intersection = (a * y1 + b * y2) + t.p1.y;
// // divide by layer width
return inside ? (intersection / RESOLUTION) : YNONE;
}
__global__
void triangleSelect(triangle* in, triangle* out, unsigned in_length,
unsigned* out_length, unsigned base_layer)
{
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t total_threads = blockDim.x * gridDim.x;
double min_height = base_layer * RESOLUTION;
double max_height = (base_layer + BLOCK_HEIGHT) * RESOLUTION;
while (idx < in_length) {
triangle t = in[idx];
idx += total_threads;
double z_min = min(t.p1.z, min(t.p2.z, t.p3.z));
if (z_min > max_height) continue;
double z_max = max(t.p1.z, max(t.p2.z, t.p3.z));
if (z_max < min_height) continue;
size_t curr_length = atomicAdd(out_length, 1);
out[curr_length] = t;
}
}
|
538f5eae618ef7f2928e45d7c8366c149c3bd7ff.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define LINE 100000
void readfile(int num[LINE]){
int temp;
int i;
FILE *fp;
fp = fopen("number.txt", "r");
i = 0;
if(fp == NULL){
printf("Error loading file!!\n");
exit(1);
}else{
while(!feof(fp)){
fscanf(fp, "%d", &temp);
num[i] = temp;
i++;
}
}
fclose(fp);
}
void printfile(int num[LINE]){
int i;
FILE *fp = fopen("update.txt", "w");
for (i = 0; i < LINE; i++)
fprintf(fp, "%d ", num[i]);
fclose(fp);
}
void copyData(int num[LINE], int num1[LINE]){
int i;
for(i = 0; i < LINE; i++)
num1[i] = num[i];
}
__global__ void even(int *dnum, int n){
int k = threadIdx.x + blockIdx.x * blockDim.x;
int temp;
k = k * 2;
if(k <= n - 2){
if(dnum[k] > dnum[k + 1]){
temp = dnum[k];
dnum[k] = dnum[k + 1];
dnum[k + 1] = temp;
}
}
}
__global__ void odd(int *dnum, int n){
int k = threadIdx.x + blockIdx.x * blockDim.x;
int temp;
k = k * 2 + 1;
if(k <= n - 2){
if(dnum[k] > dnum[k + 1]){
temp = dnum[k];
dnum[k] = dnum[k + 1];
dnum[k + 1] = temp;
}
}
}
void docuda(int *dnum, int threads, int block){
int i;
for(i = 0; i < LINE; i++){
hipLaunchKernelGGL(( even), dim3(block), dim3(threads), 0, 0, dnum, LINE);
hipLaunchKernelGGL(( odd), dim3(block), dim3(threads), 0, 0, dnum, LINE);
}
}
void cuda(int num[LINE], int num1[LINE]){
int threads, block, i;
int *dnum;
struct timeval tv;
struct timezone tz;
double start, end, time, time1, time2, average;
start = 0;
end = 0;
time = 0;
time1 = 0;
time2 = 0;
average = 0;
threads = 256;
block = 400;
printf("Time execution for parallel bubble sort using CUDA using 256 threads based on block size\n");
printf("====================================================================================================\n");
printf(" Block size Number of threads 1st time 2nd time 3rd time average \n");
printf("====================================================================================================\n");
while (block <= 1400){
for (i = 0; i < 3; i++){
copyData(num, num1);
hipMalloc(&dnum, LINE*sizeof(int));
hipMemcpy(dnum, num, LINE*sizeof(int), hipMemcpyHostToDevice);
gettimeofday(&tv, &tz);
start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000;
docuda(dnum, threads, block);
gettimeofday(&tv, &tz);
end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000;
hipMemcpy(num, dnum, LINE*sizeof(int), hipMemcpyDeviceToHost);
if (i == 0)
time = end - start;
else if (i == 1)
time1 = end - start;
else if (i == 2)
time2 = end - start;
}
average = (time + time1 + time2) / 3;
printf(" %i %i %fs %fs %fs %fs\n", block, threads, time, time1, time2, average);
block += 100;
}
}
int main(){
int num[LINE];
int num1[LINE];
printf("Getting data...\n");
readfile(num);
printf("Sorting data...\n\n");
cuda(num, num1);
printfile(num);
printf("\nParallel bubble sort in CUDA sucessfully.\n");
return 0;
} | 538f5eae618ef7f2928e45d7c8366c149c3bd7ff.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#define LINE 100000
void readfile(int num[LINE]){
int temp;
int i;
FILE *fp;
fp = fopen("number.txt", "r");
i = 0;
if(fp == NULL){
printf("Error loading file!!\n");
exit(1);
}else{
while(!feof(fp)){
fscanf(fp, "%d", &temp);
num[i] = temp;
i++;
}
}
fclose(fp);
}
void printfile(int num[LINE]){
int i;
FILE *fp = fopen("update.txt", "w");
for (i = 0; i < LINE; i++)
fprintf(fp, "%d ", num[i]);
fclose(fp);
}
void copyData(int num[LINE], int num1[LINE]){
int i;
for(i = 0; i < LINE; i++)
num1[i] = num[i];
}
__global__ void even(int *dnum, int n){
int k = threadIdx.x + blockIdx.x * blockDim.x;
int temp;
k = k * 2;
if(k <= n - 2){
if(dnum[k] > dnum[k + 1]){
temp = dnum[k];
dnum[k] = dnum[k + 1];
dnum[k + 1] = temp;
}
}
}
__global__ void odd(int *dnum, int n){
int k = threadIdx.x + blockIdx.x * blockDim.x;
int temp;
k = k * 2 + 1;
if(k <= n - 2){
if(dnum[k] > dnum[k + 1]){
temp = dnum[k];
dnum[k] = dnum[k + 1];
dnum[k + 1] = temp;
}
}
}
void docuda(int *dnum, int threads, int block){
int i;
for(i = 0; i < LINE; i++){
even<<<block, threads>>>(dnum, LINE);
odd<<<block, threads>>>(dnum, LINE);
}
}
void cuda(int num[LINE], int num1[LINE]){
int threads, block, i;
int *dnum;
struct timeval tv;
struct timezone tz;
double start, end, time, time1, time2, average;
start = 0;
end = 0;
time = 0;
time1 = 0;
time2 = 0;
average = 0;
threads = 256;
block = 400;
printf("Time execution for parallel bubble sort using CUDA using 256 threads based on block size\n");
printf("====================================================================================================\n");
printf(" Block size Number of threads 1st time 2nd time 3rd time average \n");
printf("====================================================================================================\n");
while (block <= 1400){
for (i = 0; i < 3; i++){
copyData(num, num1);
cudaMalloc(&dnum, LINE*sizeof(int));
cudaMemcpy(dnum, num, LINE*sizeof(int), cudaMemcpyHostToDevice);
gettimeofday(&tv, &tz);
start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000;
docuda(dnum, threads, block);
gettimeofday(&tv, &tz);
end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000;
cudaMemcpy(num, dnum, LINE*sizeof(int), cudaMemcpyDeviceToHost);
if (i == 0)
time = end - start;
else if (i == 1)
time1 = end - start;
else if (i == 2)
time2 = end - start;
}
average = (time + time1 + time2) / 3;
printf(" %i %i %fs %fs %fs %fs\n", block, threads, time, time1, time2, average);
block += 100;
}
}
int main(){
int num[LINE];
int num1[LINE];
printf("Getting data...\n");
readfile(num);
printf("Sorting data...\n\n");
cuda(num, num1);
printfile(num);
printf("\nParallel bubble sort in CUDA sucessfully.\n");
return 0;
} |
44c12a77efa2ab30140c43ae836119a1ab57521e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <algorithm>
#include "bucketselect_combined.cuh"
//#include "bucketselect.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <fstream>
#include <random>
// #define Enabletest 1
using namespace std;
typedef unsigned int data_t;
//typedef unsigned int data_t;
typedef int index_t;
int compare (const void * a, const void * b)
{
return ( *(int*)a - *(int*)b );//in ascending order
}
template<typename data_t,typename index_t>
index_t power(index_t x,index_t n)
{
index_t number=1;
for (index_t i=0; i<n ;i++)
{
number*=x;
}
return number;
}
void getminmax(data_t* arr,index_t n,data_t& max,data_t& min)
{
//data_t max=arr[0];
for (index_t i=1;i<n;i++)
{
if (arr[i]> max)
{
max=arr[i];
}
if (arr[i]<min)
{
min=arr[i];
}
}
return;
}
int main(int argc,char** argv)
{
cout<<"./exe num_element k NBucket"<<endl;
if (argc != 4) {cout<<"wrong input"<<endl;exit(-1);}
index_t num_pow = atol(argv[1]);
index_t base=2;
index_t num_element = power<data_t,index_t>(base,num_pow);
index_t k= atol(argv[2]);
index_t num_bucket=atol(argv[3]);//atol(argv[3]);
// index_t num_bucket=1<<NBits;
index_t alpha=0.5*(num_pow-log(k)/log(2)+3);
if (alpha<5) alpha++;
bool defaultContribution=true;
int beta=2;
index_t SubRangesize=pow(2,alpha);
index_t NSubranges=num_element/SubRangesize;
int NthreadstoworkInreduction=32;
if (SubRangesize<32)
{
NthreadstoworkInreduction=SubRangesize;
}
cout<<"Number of Subranges:"<<NSubranges<<endl;
if (NSubranges<k)
{
cout<<"Small number of subranges!. Decrease the value of alpha!"<<endl;
// exit(-1);
}
if (alpha <=5)
{
defaultContribution=false;
beta=2;//SampleBeta function is designed for Beta=3 only. So we need to update the SampleBetafunction in radix select if we want to change Beta value
}
data_t* Max_d;
H_ERR(hipMalloc((void**) &Max_d,sizeof(data_t)*NSubranges*beta));// updated for Beta
index_t* SubrangeId_d;
H_ERR(hipMalloc((void**) &SubrangeId_d,sizeof(index_t)*NSubranges*beta));//updated for beta
data_t* vec= new data_t[num_element];
data_t* vec1= new data_t[num_element];
std::random_device rd;
std::mt19937 gen(rd());
float minvalue=100000000;
unsigned int value;
// std::uniform_int_distribution <unsigned int> d(0, 4294967295);
std::normal_distribution<float> d(100000000, 10000000);//Mean =100 mill , sd=100
// std::uniform_real_distribution<> d(0.0, 4294967295.0);//Generates random uniformly distributed floats within the given range
for (index_t i=0;i<num_element;i++)
{
// vec[i]=rand()%2147483648;//2^31 -1
value=d(gen);//2^31 -1
if (minvalue > value)
{
minvalue=value;
}
vec[i]=value;//2^32 -1
// if(i<10000)
// cout<<vec[i]<<" ";
// cout<<endl;
vec1[i]=vec[i];
// if (vec[i] > 4294900000)
// {
// cout<<vec[i]<<" ";
// }
}
if (minvalue < 0)
{
cout<<"-ve value detected:"<<minvalue<<endl;
return -1;
}
cout<<vec[0];
cout<<endl;
data_t* TopArray=new data_t[k];
data_t TopKElement=0;
// data_t NNewTopElements;
data_t* vec_d;
H_ERR(hipMalloc((void**) &vec_d,sizeof(data_t)*num_element));
H_ERR(hipMemcpy(vec_d,vec,sizeof(data_t)*num_element,hipMemcpyHostToDevice));
double timeforMaxsample=0;double timeforFirstTopk=0;double timeforSecondTopk=0;double timeforNormalRadixSelect=0;double timeforConcatenation=0;
int NThreadsPerBlock=256;//only shared memory
int NSharedMemoryElements=NThreadsPerBlock<<5;//3 is giving best result in different values of SubWarp size //Each thread responsible for 32 elements and contribute to 8 Subranges from a group of 4 elements
int SizeOfAllocation=NSharedMemoryElements+(NSharedMemoryElements >> 5);
H_ERR(hipDeviceSynchronize());
index_t* SelectedSubrangeId_d;
H_ERR(hipMalloc((void**) &SelectedSubrangeId_d,sizeof(index_t)*(NSubranges-k)*beta));//updated *3 for beta
index_t* CountSelectedSubrange_d;
index_t* CountLonelyElements_d;
H_ERR(hipMalloc((void**) &CountSelectedSubrange_d,sizeof(index_t)));
H_ERR(hipMalloc((void**) &CountLonelyElements_d,sizeof(index_t)));
H_ERR(hipMemset(CountSelectedSubrange_d, 0, sizeof(index_t)));
H_ERR(hipMemset(CountLonelyElements_d, 0, sizeof(index_t)));
index_t* write_pos_d;
// H_ERR(hipMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize));
H_ERR(hipMalloc((void**) &write_pos_d,sizeof(index_t)));
data_t* ConcatenatedRange_d;
H_ERR(hipMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize));
double start=wtime();
sample_bucket_select<data_t,index_t>(vec_d,num_element,/*num_element-k*/k,num_bucket,TopKElement,NSubranges,SubRangesize,alpha,timeforMaxsample,timeforFirstTopk,timeforSecondTopk,timeforConcatenation,Max_d,SubrangeId_d,beta,defaultContribution,NthreadstoworkInreduction,NThreadsPerBlock,SizeOfAllocation,NSharedMemoryElements, SelectedSubrangeId_d, CountSelectedSubrange_d, CountLonelyElements_d, write_pos_d, ConcatenatedRange_d);
// bucket_select<data_t,index_t>(vec_d,num_element,num_element-k,num_bucket,TopArray,TopKElement);
double totalTime=wtime()-start;
cout<<"Time for selecting the top k element is:"<<totalTime*1000<<" ms"<<endl;
// bucket_select_PhaseII<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,vec);
cout<<"The kth element from top is:"<<TopKElement<<endl;
cout<<endl;
#ifdef Enabletest
sort(vec1, vec1 + num_element);
cout<<endl;
cout<<"kth element"<<vec1[num_element-k]<<endl;
cout<<"k-1 th element"<<vec1[num_element-k+1]<<endl;
cout<<"k+1 th element"<<vec1[num_element-k-1]<<endl;
if (vec1[num_element-k]==TopKElement)
{
cout<<"Success!"<<endl;
}
else
{
cout<<"Not Success!"<<endl;
}
assert(vec1[num_element-k]==TopKElement);
#endif
std::fstream timeLog;
// timeLog.open("Uniform_Unsigned__N_30_SOKBucket.csv",std::fstream::out | std::fstream::app);
//timeLog.open("Normal_float_N_29_SOKBucket.csv",std::fstream::out | std::fstream::app);
// timeLog.open("Normal_UINT_N_30_SOKBucket.csv",std::fstream::out | std::fstream::app);
timeLog.open("BucketSelect_Few_Digits_NORMAL_ALL_K.csv",std::fstream::out | std::fstream::app);
// timeLog.open("Uniform_UINT_N_29_SOKBucket.csv",std::fstream::out | std::fstream::app);
if (defaultContribution)
{
timeLog<<"D"<<";";
}
else
{
timeLog<<"B"<<";";
}
timeLog<<num_pow<<";"<<k<<";"<<alpha<<";"<<timeforMaxsample*1000<<";"<<timeforFirstTopk*1000<<";"<<timeforConcatenation*1000<<";"<<timeforSecondTopk*1000<<";"<<timeforNormalRadixSelect*1000<<";"<<totalTime*1000<<endl;
// timeLog<<num_pow<<"_N_"<<num_element<<"k_"<<k<<"num_bucket_"<<num_bucket<<";"<<totalTime*1000<<endl;
timeLog.close();
return 0;
}
| 44c12a77efa2ab30140c43ae836119a1ab57521e.cu | #include <iostream>
#include <stdlib.h>
#include <algorithm>
#include "bucketselect_combined.cuh"
//#include "bucketselect.cuh"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <fstream>
#include <random>
// #define Enabletest 1
using namespace std;
typedef unsigned int data_t;
//typedef unsigned int data_t;
typedef int index_t;
int compare (const void * a, const void * b)
{
return ( *(int*)a - *(int*)b );//in ascending order
}
template<typename data_t,typename index_t>
index_t power(index_t x,index_t n)
{
index_t number=1;
for (index_t i=0; i<n ;i++)
{
number*=x;
}
return number;
}
void getminmax(data_t* arr,index_t n,data_t& max,data_t& min)
{
//data_t max=arr[0];
for (index_t i=1;i<n;i++)
{
if (arr[i]> max)
{
max=arr[i];
}
if (arr[i]<min)
{
min=arr[i];
}
}
return;
}
int main(int argc,char** argv)
{
cout<<"./exe num_element k NBucket"<<endl;
if (argc != 4) {cout<<"wrong input"<<endl;exit(-1);}
index_t num_pow = atol(argv[1]);
index_t base=2;
index_t num_element = power<data_t,index_t>(base,num_pow);
index_t k= atol(argv[2]);
index_t num_bucket=atol(argv[3]);//atol(argv[3]);
// index_t num_bucket=1<<NBits;
index_t alpha=0.5*(num_pow-log(k)/log(2)+3);
if (alpha<5) alpha++;
bool defaultContribution=true;
int beta=2;
index_t SubRangesize=pow(2,alpha);
index_t NSubranges=num_element/SubRangesize;
int NthreadstoworkInreduction=32;
if (SubRangesize<32)
{
NthreadstoworkInreduction=SubRangesize;
}
cout<<"Number of Subranges:"<<NSubranges<<endl;
if (NSubranges<k)
{
cout<<"Small number of subranges!. Decrease the value of alpha!"<<endl;
// exit(-1);
}
if (alpha <=5)
{
defaultContribution=false;
beta=2;//SampleBeta function is designed for Beta=3 only. So we need to update the SampleBetafunction in radix select if we want to change Beta value
}
data_t* Max_d;
H_ERR(cudaMalloc((void**) &Max_d,sizeof(data_t)*NSubranges*beta));// updated for Beta
index_t* SubrangeId_d;
H_ERR(cudaMalloc((void**) &SubrangeId_d,sizeof(index_t)*NSubranges*beta));//updated for beta
data_t* vec= new data_t[num_element];
data_t* vec1= new data_t[num_element];
std::random_device rd;
std::mt19937 gen(rd());
float minvalue=100000000;
unsigned int value;
// std::uniform_int_distribution <unsigned int> d(0, 4294967295);
std::normal_distribution<float> d(100000000, 10000000);//Mean =100 mill , sd=100
// std::uniform_real_distribution<> d(0.0, 4294967295.0);//Generates random uniformly distributed floats within the given range
for (index_t i=0;i<num_element;i++)
{
// vec[i]=rand()%2147483648;//2^31 -1
value=d(gen);//2^31 -1
if (minvalue > value)
{
minvalue=value;
}
vec[i]=value;//2^32 -1
// if(i<10000)
// cout<<vec[i]<<" ";
// cout<<endl;
vec1[i]=vec[i];
// if (vec[i] > 4294900000)
// {
// cout<<vec[i]<<" ";
// }
}
if (minvalue < 0)
{
cout<<"-ve value detected:"<<minvalue<<endl;
return -1;
}
cout<<vec[0];
cout<<endl;
data_t* TopArray=new data_t[k];
data_t TopKElement=0;
// data_t NNewTopElements;
data_t* vec_d;
H_ERR(cudaMalloc((void**) &vec_d,sizeof(data_t)*num_element));
H_ERR(cudaMemcpy(vec_d,vec,sizeof(data_t)*num_element,cudaMemcpyHostToDevice));
double timeforMaxsample=0;double timeforFirstTopk=0;double timeforSecondTopk=0;double timeforNormalRadixSelect=0;double timeforConcatenation=0;
int NThreadsPerBlock=256;//only shared memory
int NSharedMemoryElements=NThreadsPerBlock<<5;//3 is giving best result in different values of SubWarp size //Each thread responsible for 32 elements and contribute to 8 Subranges from a group of 4 elements
int SizeOfAllocation=NSharedMemoryElements+(NSharedMemoryElements >> 5);
H_ERR(cudaDeviceSynchronize());
index_t* SelectedSubrangeId_d;
H_ERR(cudaMalloc((void**) &SelectedSubrangeId_d,sizeof(index_t)*(NSubranges-k)*beta));//updated *3 for beta
index_t* CountSelectedSubrange_d;
index_t* CountLonelyElements_d;
H_ERR(cudaMalloc((void**) &CountSelectedSubrange_d,sizeof(index_t)));
H_ERR(cudaMalloc((void**) &CountLonelyElements_d,sizeof(index_t)));
H_ERR(cudaMemset(CountSelectedSubrange_d, 0, sizeof(index_t)));
H_ERR(cudaMemset(CountLonelyElements_d, 0, sizeof(index_t)));
index_t* write_pos_d;
// H_ERR(cudaMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize));
H_ERR(cudaMalloc((void**) &write_pos_d,sizeof(index_t)));
data_t* ConcatenatedRange_d;
H_ERR(cudaMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize));
double start=wtime();
sample_bucket_select<data_t,index_t>(vec_d,num_element,/*num_element-k*/k,num_bucket,TopKElement,NSubranges,SubRangesize,alpha,timeforMaxsample,timeforFirstTopk,timeforSecondTopk,timeforConcatenation,Max_d,SubrangeId_d,beta,defaultContribution,NthreadstoworkInreduction,NThreadsPerBlock,SizeOfAllocation,NSharedMemoryElements, SelectedSubrangeId_d, CountSelectedSubrange_d, CountLonelyElements_d, write_pos_d, ConcatenatedRange_d);
// bucket_select<data_t,index_t>(vec_d,num_element,num_element-k,num_bucket,TopArray,TopKElement);
double totalTime=wtime()-start;
cout<<"Time for selecting the top k element is:"<<totalTime*1000<<" ms"<<endl;
// bucket_select_PhaseII<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,vec);
cout<<"The kth element from top is:"<<TopKElement<<endl;
cout<<endl;
#ifdef Enabletest
sort(vec1, vec1 + num_element);
cout<<endl;
cout<<"kth element"<<vec1[num_element-k]<<endl;
cout<<"k-1 th element"<<vec1[num_element-k+1]<<endl;
cout<<"k+1 th element"<<vec1[num_element-k-1]<<endl;
if (vec1[num_element-k]==TopKElement)
{
cout<<"Success!"<<endl;
}
else
{
cout<<"Not Success!"<<endl;
}
assert(vec1[num_element-k]==TopKElement);
#endif
std::fstream timeLog;
// timeLog.open("Uniform_Unsigned__N_30_SOKBucket.csv",std::fstream::out | std::fstream::app);
//timeLog.open("Normal_float_N_29_SOKBucket.csv",std::fstream::out | std::fstream::app);
// timeLog.open("Normal_UINT_N_30_SOKBucket.csv",std::fstream::out | std::fstream::app);
timeLog.open("BucketSelect_Few_Digits_NORMAL_ALL_K.csv",std::fstream::out | std::fstream::app);
// timeLog.open("Uniform_UINT_N_29_SOKBucket.csv",std::fstream::out | std::fstream::app);
if (defaultContribution)
{
timeLog<<"D"<<";";
}
else
{
timeLog<<"B"<<";";
}
timeLog<<num_pow<<";"<<k<<";"<<alpha<<";"<<timeforMaxsample*1000<<";"<<timeforFirstTopk*1000<<";"<<timeforConcatenation*1000<<";"<<timeforSecondTopk*1000<<";"<<timeforNormalRadixSelect*1000<<";"<<totalTime*1000<<endl;
// timeLog<<num_pow<<"_N_"<<num_element<<"k_"<<k<<"num_bucket_"<<num_bucket<<";"<<totalTime*1000<<endl;
timeLog.close();
return 0;
}
|
0fc5717998a1cf5fee0cef1a8b2ca8102dcc58c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Genoil's CUDA mining kernel for Ethereum
* based on Tim Hughes' opencl kernel.
* thanks to sp_, trpuvot, djm34, cbuchner for things i took from ccminer.
*/
#include "CUDAMiner_cuda.h"
#include "cuda_helper.h"
#include "../libethash/ethash.h"
#include "stdio.h"
#include "nvm_til.h"
// Implementation based on:
// https://github.com/mjosaarinen/tiny_sha3/blob/master/sha3.c
// converted from 64->32 bit words
__device__ __constant__ const uint64_t keccakf_rndc[24] = {
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808AULL,
0x8000000080008000ULL, 0x000000000000808BULL, 0x0000000080000001ULL,
0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008AULL,
0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000AULL,
0x000000008000808BULL, 0x800000000000008BULL, 0x8000000000008089ULL,
0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
0x000000000000800AULL, 0x800000008000000AULL, 0x8000000080008081ULL,
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
__device__ __forceinline__ void keccak_f1600_round(uint64_t st[25], const int r)
{
const uint32_t keccakf_rotc[24] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
};
const uint32_t keccakf_piln[24] = {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
};
uint64_t t, bc[5];
// Theta
for (int i = 0; i < 5; i++)
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
for (int i = 0; i < 5; i++) {
t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
for (uint32_t j = 0; j < 25; j += 5)
st[j + i] ^= t;
}
// Rho Pi
t = st[1];
for (int i = 0; i < 24; i++) {
uint32_t j = keccakf_piln[i];
bc[0] = st[j];
st[j] = ROTL64(t, keccakf_rotc[i]);
t = bc[0];
}
// Chi
for (uint32_t j = 0; j < 25; j += 5) {
for (int i = 0; i < 5; i++)
bc[i] = st[j + i];
for (int i = 0; i < 5; i++)
st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
// Iota
st[0] ^= keccakf_rndc[r];
}
__device__ __forceinline__ void keccak_f1600(uint64_t st[25])
{
for (int i = 8; i < 25; i++)
{
st[i] = 0;
}
st[8] = 0x8000000000000001;
for (int r = 0; r < 24; r++) {
keccak_f1600_round(st, r);
}
}
#define FNV_PRIME 0x01000193U
#define fnv(x,y) ((uint32_t(x) * (FNV_PRIME)) ^uint32_t(y))
__device__ uint4 fnv4(uint4 a, uint4 b)
{
uint4 c;
c.x = a.x * FNV_PRIME ^ b.x;
c.y = a.y * FNV_PRIME ^ b.y;
c.z = a.z * FNV_PRIME ^ b.z;
c.w = a.w * FNV_PRIME ^ b.w;
return c;
}
#define NODE_WORDS (ETHASH_HASH_BYTES/sizeof(uint32_t))
__global__ void
ethash_calculate_dag_item(uint32_t start, hash64_t *g_dag, uint64_t dag_bytes, hash64_t* g_light, uint32_t light_words)
{
uint64_t const node_index = start + uint64_t(blockIdx.x) * blockDim.x + threadIdx.x;
uint64_t num_nodes = dag_bytes / sizeof(hash64_t);
uint64_t num_nodes_rounded = ((num_nodes + 3) / 4) * 4;
if (node_index >= num_nodes_rounded) return; // None of the threads from this quad have valid node_index
hash200_t dag_node;
for(int i=0; i<4; i++)
dag_node.uint4s[i] = g_light[node_index % light_words].uint4s[i];
dag_node.words[0] ^= node_index;
keccak_f1600(dag_node.uint64s);
const int thread_id = threadIdx.x & 3;
#pragma unroll
for (uint32_t i = 0; i < ETHASH_DATASET_PARENTS; ++i) {
uint32_t parent_index = fnv(node_index ^ i, dag_node.words[i % NODE_WORDS]) % light_words;
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,parent_index, t, 4);
uint4 p4 = g_light[shuffle_index].uint4s[thread_id];
#pragma unroll
for (int w = 0; w < 4; w++) {
uint4 s4 = make_uint4(__shfl_sync(0xFFFFFFFF,p4.x, w, 4),
__shfl_sync(0xFFFFFFFF,p4.y, w, 4),
__shfl_sync(0xFFFFFFFF,p4.z, w, 4),
__shfl_sync(0xFFFFFFFF,p4.w, w, 4));
if (t == thread_id) {
dag_node.uint4s[w] = fnv4(dag_node.uint4s[w], s4);
}
}
}
}
keccak_f1600(dag_node.uint64s);
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,node_index, t, 4);
uint4 s[4];
for (uint32_t w = 0; w < 4; w++) {
s[w] = make_uint4(__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].x, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].y, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].z, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].w, t, 4));
}
if(shuffle_index*sizeof(hash64_t) < dag_bytes){
g_dag[shuffle_index].uint4s[thread_id] = s[thread_id];
CLWB(&g_dag[shuffle_index].uint4s[thread_id]);
}
}
MEM_FENCE;
}
void ethash_generate_dag(
hash64_t* dag,
uint64_t dag_bytes,
hash64_t * light,
uint32_t light_words,
uint32_t blocks,
uint32_t threads,
hipStream_t stream,
int device
)
{
uint64_t const work = dag_bytes / sizeof(hash64_t);
uint32_t fullRuns = (uint32_t)(work / (blocks * threads));
uint32_t const restWork = (uint32_t)(work % (blocks * threads));
if (restWork > 0) fullRuns++;
printf("fullRuns=%d\n",fullRuns);
for (uint32_t i = 0; i < fullRuns; i++)
{
hipLaunchKernelGGL(( ethash_calculate_dag_item) , dim3(blocks), dim3(threads), 0, stream , i * blocks * threads, dag, dag_bytes, light, light_words);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
CUDA_SAFE_CALL(hipGetLastError());
}
| 0fc5717998a1cf5fee0cef1a8b2ca8102dcc58c0.cu | /*
* Genoil's CUDA mining kernel for Ethereum
* based on Tim Hughes' opencl kernel.
* thanks to sp_, trpuvot, djm34, cbuchner for things i took from ccminer.
*/
#include "CUDAMiner_cuda.h"
#include "cuda_helper.h"
#include "../libethash/ethash.h"
#include "stdio.h"
#include "nvm_til.h"
// Implementation based on:
// https://github.com/mjosaarinen/tiny_sha3/blob/master/sha3.c
// converted from 64->32 bit words
__device__ __constant__ const uint64_t keccakf_rndc[24] = {
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808AULL,
0x8000000080008000ULL, 0x000000000000808BULL, 0x0000000080000001ULL,
0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008AULL,
0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000AULL,
0x000000008000808BULL, 0x800000000000008BULL, 0x8000000000008089ULL,
0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
0x000000000000800AULL, 0x800000008000000AULL, 0x8000000080008081ULL,
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
__device__ __forceinline__ void keccak_f1600_round(uint64_t st[25], const int r)
{
const uint32_t keccakf_rotc[24] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
};
const uint32_t keccakf_piln[24] = {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
};
uint64_t t, bc[5];
// Theta
for (int i = 0; i < 5; i++)
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
for (int i = 0; i < 5; i++) {
t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
for (uint32_t j = 0; j < 25; j += 5)
st[j + i] ^= t;
}
// Rho Pi
t = st[1];
for (int i = 0; i < 24; i++) {
uint32_t j = keccakf_piln[i];
bc[0] = st[j];
st[j] = ROTL64(t, keccakf_rotc[i]);
t = bc[0];
}
// Chi
for (uint32_t j = 0; j < 25; j += 5) {
for (int i = 0; i < 5; i++)
bc[i] = st[j + i];
for (int i = 0; i < 5; i++)
st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
// Iota
st[0] ^= keccakf_rndc[r];
}
__device__ __forceinline__ void keccak_f1600(uint64_t st[25])
{
for (int i = 8; i < 25; i++)
{
st[i] = 0;
}
st[8] = 0x8000000000000001;
for (int r = 0; r < 24; r++) {
keccak_f1600_round(st, r);
}
}
#define FNV_PRIME 0x01000193U
#define fnv(x,y) ((uint32_t(x) * (FNV_PRIME)) ^uint32_t(y))
__device__ uint4 fnv4(uint4 a, uint4 b)
{
uint4 c;
c.x = a.x * FNV_PRIME ^ b.x;
c.y = a.y * FNV_PRIME ^ b.y;
c.z = a.z * FNV_PRIME ^ b.z;
c.w = a.w * FNV_PRIME ^ b.w;
return c;
}
#define NODE_WORDS (ETHASH_HASH_BYTES/sizeof(uint32_t))
__global__ void
ethash_calculate_dag_item(uint32_t start, hash64_t *g_dag, uint64_t dag_bytes, hash64_t* g_light, uint32_t light_words)
{
uint64_t const node_index = start + uint64_t(blockIdx.x) * blockDim.x + threadIdx.x;
uint64_t num_nodes = dag_bytes / sizeof(hash64_t);
uint64_t num_nodes_rounded = ((num_nodes + 3) / 4) * 4;
if (node_index >= num_nodes_rounded) return; // None of the threads from this quad have valid node_index
hash200_t dag_node;
for(int i=0; i<4; i++)
dag_node.uint4s[i] = g_light[node_index % light_words].uint4s[i];
dag_node.words[0] ^= node_index;
keccak_f1600(dag_node.uint64s);
const int thread_id = threadIdx.x & 3;
#pragma unroll
for (uint32_t i = 0; i < ETHASH_DATASET_PARENTS; ++i) {
uint32_t parent_index = fnv(node_index ^ i, dag_node.words[i % NODE_WORDS]) % light_words;
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,parent_index, t, 4);
uint4 p4 = g_light[shuffle_index].uint4s[thread_id];
#pragma unroll
for (int w = 0; w < 4; w++) {
uint4 s4 = make_uint4(__shfl_sync(0xFFFFFFFF,p4.x, w, 4),
__shfl_sync(0xFFFFFFFF,p4.y, w, 4),
__shfl_sync(0xFFFFFFFF,p4.z, w, 4),
__shfl_sync(0xFFFFFFFF,p4.w, w, 4));
if (t == thread_id) {
dag_node.uint4s[w] = fnv4(dag_node.uint4s[w], s4);
}
}
}
}
keccak_f1600(dag_node.uint64s);
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,node_index, t, 4);
uint4 s[4];
for (uint32_t w = 0; w < 4; w++) {
s[w] = make_uint4(__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].x, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].y, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].z, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].w, t, 4));
}
if(shuffle_index*sizeof(hash64_t) < dag_bytes){
g_dag[shuffle_index].uint4s[thread_id] = s[thread_id];
CLWB(&g_dag[shuffle_index].uint4s[thread_id]);
}
}
MEM_FENCE;
}
void ethash_generate_dag(
hash64_t* dag,
uint64_t dag_bytes,
hash64_t * light,
uint32_t light_words,
uint32_t blocks,
uint32_t threads,
cudaStream_t stream,
int device
)
{
uint64_t const work = dag_bytes / sizeof(hash64_t);
uint32_t fullRuns = (uint32_t)(work / (blocks * threads));
uint32_t const restWork = (uint32_t)(work % (blocks * threads));
if (restWork > 0) fullRuns++;
printf("fullRuns=%d\n",fullRuns);
for (uint32_t i = 0; i < fullRuns; i++)
{
ethash_calculate_dag_item <<<blocks, threads, 0, stream >>>(i * blocks * threads, dag, dag_bytes, light, light_words);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
}
CUDA_SAFE_CALL(cudaGetLastError());
}
|
a01052f105490df08e8c3d5f720512e79aed567e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#define cudaCheck(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"cudaAssert: %s at %s:%d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
const char *boolStrings[2] = {"NO", "YES"};
int main(void)
{
// print GPU info
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device Name: %s\n", prop.name);
printf(" Compute Capability: %d.%d\n", prop.major, prop.minor);
printf(" Number of SMs: %d\n", prop.multiProcessorCount);
printf(" Core Clock Rate (KHz): %d\n",
prop.clockRate);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Device Overlap Supported: %s\n", boolStrings[prop.deviceOverlap]);
printf(" Concurrent Kernels Supported: %s\n", boolStrings[prop.concurrentKernels]);
printf(" Managed Memory Supported: %s\n", boolStrings[prop.managedMemory]);
printf(" Concurrent Managed Memory Access Supported: %s\n\n",
boolStrings[prop.concurrentManagedAccess]);
}
return 0;
}
| a01052f105490df08e8c3d5f720512e79aed567e.cu | #include <stdio.h>
#define cudaCheck(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"cudaAssert: %s at %s:%d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
const char *boolStrings[2] = {"NO", "YES"};
int main(void)
{
// print GPU info
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device Name: %s\n", prop.name);
printf(" Compute Capability: %d.%d\n", prop.major, prop.minor);
printf(" Number of SMs: %d\n", prop.multiProcessorCount);
printf(" Core Clock Rate (KHz): %d\n",
prop.clockRate);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Device Overlap Supported: %s\n", boolStrings[prop.deviceOverlap]);
printf(" Concurrent Kernels Supported: %s\n", boolStrings[prop.concurrentKernels]);
printf(" Managed Memory Supported: %s\n", boolStrings[prop.managedMemory]);
printf(" Concurrent Managed Memory Access Supported: %s\n\n",
boolStrings[prop.concurrentManagedAccess]);
}
return 0;
}
|
66c62787b7d3e1d0b4662a677acc3e08d746cfa2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief Functions for computing the two hop neighbor pairs of a graph
*
* @file two_hop_neighbors.cu
* ---------------------------------------------------------------------------**/
#include <rmm/thrust_rmm_allocator.h>
#include <utilities/error_utils.h>
#include <algorithms.hpp>
#include <graph.hpp>
#include "two_hop_neighbors.cuh"
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
namespace cugraph {
template <typename VT, typename ET, typename WT>
std::unique_ptr<cugraph::experimental::GraphCOO<VT, ET, WT>> get_two_hop_neighbors(
experimental::GraphCSRView<VT, ET, WT> const &graph)
{
hipStream_t stream{nullptr};
rmm::device_vector<ET> exsum_degree(graph.number_of_edges + 1);
ET *d_exsum_degree = exsum_degree.data().get();
// Find the degree of the out vertex of each edge
degree_iterator<ET> deg_it(graph.offsets);
deref_functor<degree_iterator<ET>, ET> deref(deg_it);
exsum_degree[0] = ET{0};
thrust::transform(rmm::exec_policy(stream)->on(stream),
graph.indices,
graph.indices + graph.number_of_edges,
d_exsum_degree + 1,
deref);
// Take the inclusive sum of the degrees
thrust::inclusive_scan(rmm::exec_policy(stream)->on(stream),
d_exsum_degree + 1,
d_exsum_degree + graph.number_of_edges + 1,
d_exsum_degree + 1);
// Copy out the last value to get the size of scattered output
ET output_size = exsum_degree[graph.number_of_edges];
// Allocate memory for the scattered output
rmm::device_vector<VT> first_pair(output_size);
rmm::device_vector<VT> second_pair(output_size);
VT *d_first_pair = first_pair.data().get();
VT *d_second_pair = second_pair.data().get();
// Figure out number of blocks and allocate memory for block bucket offsets
ET num_blocks = (output_size + TWO_HOP_BLOCK_SIZE - 1) / TWO_HOP_BLOCK_SIZE;
rmm::device_vector<ET> block_bucket_offsets(num_blocks + 1);
ET *d_block_bucket_offsets = block_bucket_offsets.data().get();
// Compute the block bucket offsets
dim3 grid, block;
block.x = 512;
grid.x = min((ET)MAXBLOCKS, (num_blocks / 512) + 1);
hipLaunchKernelGGL(( compute_bucket_offsets_kernel), dim3(grid), dim3(block), 0, nullptr,
d_exsum_degree, d_block_bucket_offsets, graph.number_of_edges, output_size);
block_bucket_offsets[num_blocks] = graph.number_of_edges;
// Scatter the expanded edge lists into temp space
grid.x = min((ET)MAXBLOCKS, num_blocks);
hipLaunchKernelGGL(( scatter_expand_kernel), dim3(grid), dim3(block), 0, nullptr, d_exsum_degree,
graph.indices,
graph.offsets,
d_block_bucket_offsets,
graph.number_of_vertices,
output_size,
num_blocks,
d_first_pair,
d_second_pair);
// TODO: This would be faster in a hash table (no sorting), unless there's
// some reason that the result has to be sorted
// Remove duplicates and self pairings
auto tuple_start = thrust::make_zip_iterator(thrust::make_tuple(d_first_pair, d_second_pair));
auto tuple_end = tuple_start + output_size;
thrust::sort(rmm::exec_policy(stream)->on(stream), tuple_start, tuple_end);
tuple_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream),
tuple_start,
tuple_end,
tuple_start,
self_loop_flagger<VT>());
tuple_end = thrust::unique(rmm::exec_policy(stream)->on(stream), tuple_start, tuple_end);
// Get things ready to return
ET outputSize = tuple_end - tuple_start;
auto result = std::make_unique<cugraph::experimental::GraphCOO<VT, ET, WT>>(
graph.number_of_vertices, outputSize, false);
hipMemcpy(result->src_indices(), d_first_pair, sizeof(VT) * outputSize, hipMemcpyDefault);
hipMemcpy(result->dst_indices(), d_second_pair, sizeof(VT) * outputSize, hipMemcpyDefault);
return result;
}
template std::unique_ptr<cugraph::experimental::GraphCOO<int, int, float>> get_two_hop_neighbors(
experimental::GraphCSRView<int, int, float> const &);
template std::unique_ptr<cugraph::experimental::GraphCOO<int, int, double>> get_two_hop_neighbors(
experimental::GraphCSRView<int, int, double> const &);
} // namespace cugraph
| 66c62787b7d3e1d0b4662a677acc3e08d746cfa2.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief Functions for computing the two hop neighbor pairs of a graph
*
* @file two_hop_neighbors.cu
* ---------------------------------------------------------------------------**/
#include <rmm/thrust_rmm_allocator.h>
#include <utilities/error_utils.h>
#include <algorithms.hpp>
#include <graph.hpp>
#include "two_hop_neighbors.cuh"
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
namespace cugraph {
template <typename VT, typename ET, typename WT>
std::unique_ptr<cugraph::experimental::GraphCOO<VT, ET, WT>> get_two_hop_neighbors(
experimental::GraphCSRView<VT, ET, WT> const &graph)
{
cudaStream_t stream{nullptr};
rmm::device_vector<ET> exsum_degree(graph.number_of_edges + 1);
ET *d_exsum_degree = exsum_degree.data().get();
// Find the degree of the out vertex of each edge
degree_iterator<ET> deg_it(graph.offsets);
deref_functor<degree_iterator<ET>, ET> deref(deg_it);
exsum_degree[0] = ET{0};
thrust::transform(rmm::exec_policy(stream)->on(stream),
graph.indices,
graph.indices + graph.number_of_edges,
d_exsum_degree + 1,
deref);
// Take the inclusive sum of the degrees
thrust::inclusive_scan(rmm::exec_policy(stream)->on(stream),
d_exsum_degree + 1,
d_exsum_degree + graph.number_of_edges + 1,
d_exsum_degree + 1);
// Copy out the last value to get the size of scattered output
ET output_size = exsum_degree[graph.number_of_edges];
// Allocate memory for the scattered output
rmm::device_vector<VT> first_pair(output_size);
rmm::device_vector<VT> second_pair(output_size);
VT *d_first_pair = first_pair.data().get();
VT *d_second_pair = second_pair.data().get();
// Figure out number of blocks and allocate memory for block bucket offsets
ET num_blocks = (output_size + TWO_HOP_BLOCK_SIZE - 1) / TWO_HOP_BLOCK_SIZE;
rmm::device_vector<ET> block_bucket_offsets(num_blocks + 1);
ET *d_block_bucket_offsets = block_bucket_offsets.data().get();
// Compute the block bucket offsets
dim3 grid, block;
block.x = 512;
grid.x = min((ET)MAXBLOCKS, (num_blocks / 512) + 1);
compute_bucket_offsets_kernel<<<grid, block, 0, nullptr>>>(
d_exsum_degree, d_block_bucket_offsets, graph.number_of_edges, output_size);
block_bucket_offsets[num_blocks] = graph.number_of_edges;
// Scatter the expanded edge lists into temp space
grid.x = min((ET)MAXBLOCKS, num_blocks);
scatter_expand_kernel<<<grid, block, 0, nullptr>>>(d_exsum_degree,
graph.indices,
graph.offsets,
d_block_bucket_offsets,
graph.number_of_vertices,
output_size,
num_blocks,
d_first_pair,
d_second_pair);
// TODO: This would be faster in a hash table (no sorting), unless there's
// some reason that the result has to be sorted
// Remove duplicates and self pairings
auto tuple_start = thrust::make_zip_iterator(thrust::make_tuple(d_first_pair, d_second_pair));
auto tuple_end = tuple_start + output_size;
thrust::sort(rmm::exec_policy(stream)->on(stream), tuple_start, tuple_end);
tuple_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream),
tuple_start,
tuple_end,
tuple_start,
self_loop_flagger<VT>());
tuple_end = thrust::unique(rmm::exec_policy(stream)->on(stream), tuple_start, tuple_end);
// Get things ready to return
ET outputSize = tuple_end - tuple_start;
auto result = std::make_unique<cugraph::experimental::GraphCOO<VT, ET, WT>>(
graph.number_of_vertices, outputSize, false);
cudaMemcpy(result->src_indices(), d_first_pair, sizeof(VT) * outputSize, cudaMemcpyDefault);
cudaMemcpy(result->dst_indices(), d_second_pair, sizeof(VT) * outputSize, cudaMemcpyDefault);
return result;
}
template std::unique_ptr<cugraph::experimental::GraphCOO<int, int, float>> get_two_hop_neighbors(
experimental::GraphCSRView<int, int, float> const &);
template std::unique_ptr<cugraph::experimental::GraphCOO<int, int, double>> get_two_hop_neighbors(
experimental::GraphCSRView<int, int, double> const &);
} // namespace cugraph
|
2cec06d16d1a88a81d64ac7ef79f642d0a0280ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
const int K32 = 32;
const int K16 = 16;
// Utility functions: compare, print, and fill matrices
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line){
if (err != hipSuccess) {
fprintf(stderr, "CUDA error at: %s : %d\n", file,line);
fprintf(stderr, "%s %s\n", hipGetErrorString(err), func);;
exit(1);
}
}
bool compare_matrices(float *gpu, float *ref, const int N){
for(int j = 0; j < N; j++){
for(int i = 0; i < N; i++){
if(abs(ref[i + j*N] - gpu[i + j*N]) > 0.0001){
return true;
}
}
}
return false;
}
void print_matrix(float *mat, const int N) {
for(int j=0; j < N; j++) {
for(int i=0; i < N; i++) {
printf("%4.4g ", mat[i + j*N]);
}
printf("\n");
}
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat, const int N){
for(int j = 0; j < N * N ; j++)
mat[j] = static_cast<float>(j);
}
void transpose_CPU(float *in, float *out, const int N){
for(int j = 0; j < N; j++)
for(int i = 0; i < N; i++)
out[j + i * N] = in[i + j * N];//out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__
void transpose_serial(float *in, float *out, const int N){
for(int j = 0; j < N; j++)
for(int i = 0; i < N; i++)
out[j + i * N] = in[i + j * N];//out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__
void transpose_parallel_per_row(float *in, float *out, const int N){
int i = threadIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in KxK threadblocks
// thread (x,y) in grid writes element (i,j) of output matrix
__global__
void transpose_parallel_per_element(float *in, float *out, const int N, const int K){
int i = blockIdx.x * K + threadIdx.x;//blockK*K
int j = blockIdx.y * K + threadIdx.y;//gridN/K * N/Kblock.
out[j + i * N] = in[i + j * N];
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__
void transpose_parallel_per_element_32_32_tiled(float *in, float *out, const int N, const int K){
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K32][K32];
tile[y][x] = in[in_corner_i + x + (in_corner_j+y) * N];
__syncthreads();
out[out_corner_i + y + (out_corner_j + x) * N] = tile[y][x];
}
__global__
void transpose_parallel_per_element_16_16_tiled(float *in, float *out, const int N, const int K){
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K16][K16];
tile[y][x] = in[in_corner_i + x + (in_corner_j+y) * N];
__syncthreads();
out[out_corner_i + y + (out_corner_j + x) * N] = tile[y][x];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__
void transpose_parallel_per_element_32_32_tiled_padded1(float *in, float *out, const int N, const int K){
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K32][K32+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[in_corner_i + x + (in_corner_j + y) * N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[out_corner_i + y + (out_corner_j + x)*N] = tile[y][x];
}
__global__
void transpose_parallel_per_element_16_16_tiled_padded1(float *in, float *out, const int N, const int K){
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K16][K16+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[in_corner_i + x + (in_corner_j + y) * N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[out_corner_i + y + (out_corner_j + x)*N] = tile[y][x];
}
int main(int argc, char **argv){
//set up date
const int N = 1024;
int K = 32;
const size_t numbytes = N * N * sizeof(float);
//MALLOC host memory
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
//init data and get the gold
fill_matrix(in, N);
transpose_CPU(in, gold, N);
//MALLOC device memory
float *d_in, *d_out;
hipMalloc(&d_in, numbytes);
hipMalloc(&d_out, numbytes);
hipMemcpy(d_in, in, numbytes, hipMemcpyHostToDevice);
GpuTimer timer;
// Now time each kernel and verify that it produces the correct result.
timer.Start();
hipLaunchKernelGGL(( transpose_serial), dim3(1),dim3(1), 0, 0, d_in, d_out, N);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_row), dim3(1),dim3(N), 0, 0, d_in, d_out, N);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
dim3 block(K,K);
dim3 grid(N/K, N/K);
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element), dim3(grid), dim3(block), 0, 0, d_in, d_out, N, K);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_32_32_tiled), dim3(grid), dim3(block), K*K*sizeof(float), 0, d_in, d_out,N, K);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_32_32_tiled_padded1), dim3(grid),dim3(block), K*K*sizeof(float), 0, d_in, d_out, N, K);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_padded %dx%d: %g ms.\nVerifying...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
K = 16;
dim3 block16(K, K);
dim3 grid16(N/K, N/K);
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_16_16_tiled), dim3(grid16), dim3(block16),K*K*sizeof(float), 0, d_in, d_out, N, K);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_16_16_tiled_padded1), dim3(grid16), dim3(block16), K*K*sizeof(float), 0, d_in, d_out, N, K);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_padded %dx%d: %g ms.\nVerifying...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold,N) ? "Failed" : "Success");
//free data
free(in);
free(out);
free(gold);
hipFree(d_in);
hipFree(d_out);
}
| 2cec06d16d1a88a81d64ac7ef79f642d0a0280ac.cu | #include <stdio.h>
#include "gputimer.h"
const int K32 = 32;
const int K16 = 16;
// Utility functions: compare, print, and fill matrices
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line){
if (err != cudaSuccess) {
fprintf(stderr, "CUDA error at: %s : %d\n", file,line);
fprintf(stderr, "%s %s\n", cudaGetErrorString(err), func);;
exit(1);
}
}
bool compare_matrices(float *gpu, float *ref, const int N){
for(int j = 0; j < N; j++){
for(int i = 0; i < N; i++){
if(abs(ref[i + j*N] - gpu[i + j*N]) > 0.0001){
return true;
}
}
}
return false;
}
void print_matrix(float *mat, const int N) {
for(int j=0; j < N; j++) {
for(int i=0; i < N; i++) {
printf("%4.4g ", mat[i + j*N]);
}
printf("\n");
}
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat, const int N){
for(int j = 0; j < N * N ; j++)
mat[j] = static_cast<float>(j);
}
void transpose_CPU(float *in, float *out, const int N){
for(int j = 0; j < N; j++)
for(int i = 0; i < N; i++)
out[j + i * N] = in[i + j * N];//out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__
void transpose_serial(float *in, float *out, const int N){
for(int j = 0; j < N; j++)
for(int i = 0; i < N; i++)
out[j + i * N] = in[i + j * N];//out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__
void transpose_parallel_per_row(float *in, float *out, const int N){
int i = threadIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in KxK threadblocks
// thread (x,y) in grid writes element (i,j) of output matrix
__global__
void transpose_parallel_per_element(float *in, float *out, const int N, const int K){
int i = blockIdx.x * K + threadIdx.x;//每个block分配K*K个线程
int j = blockIdx.y * K + threadIdx.y;//每个grid分配N/K * N/K个block.
out[j + i * N] = in[i + j * N];
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__
void transpose_parallel_per_element_32_32_tiled(float *in, float *out, const int N, const int K){
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K32][K32];
tile[y][x] = in[in_corner_i + x + (in_corner_j+y) * N];
__syncthreads();
out[out_corner_i + y + (out_corner_j + x) * N] = tile[y][x];
}
__global__
void transpose_parallel_per_element_16_16_tiled(float *in, float *out, const int N, const int K){
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K16][K16];
tile[y][x] = in[in_corner_i + x + (in_corner_j+y) * N];
__syncthreads();
out[out_corner_i + y + (out_corner_j + x) * N] = tile[y][x];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__
void transpose_parallel_per_element_32_32_tiled_padded1(float *in, float *out, const int N, const int K){
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K32][K32+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[in_corner_i + x + (in_corner_j + y) * N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[out_corner_i + y + (out_corner_j + x)*N] = tile[y][x];
}
__global__
void transpose_parallel_per_element_16_16_tiled_padded1(float *in, float *out, const int N, const int K){
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K16][K16+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[in_corner_i + x + (in_corner_j + y) * N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[out_corner_i + y + (out_corner_j + x)*N] = tile[y][x];
}
int main(int argc, char **argv){
//set up date
const int N = 1024;
int K = 32;
const size_t numbytes = N * N * sizeof(float);
//MALLOC host memory
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
//init data and get the gold
fill_matrix(in, N);
transpose_CPU(in, gold, N);
//MALLOC device memory
float *d_in, *d_out;
cudaMalloc(&d_in, numbytes);
cudaMalloc(&d_out, numbytes);
cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice);
GpuTimer timer;
// Now time each kernel and verify that it produces the correct result.
timer.Start();
transpose_serial<<<1,1>>>(d_in, d_out, N);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
timer.Start();
transpose_parallel_per_row<<<1,N>>>(d_in, d_out, N);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
dim3 block(K,K);
dim3 grid(N/K, N/K);
timer.Start();
transpose_parallel_per_element<<<grid, block>>>(d_in, d_out, N, K);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
timer.Start();
transpose_parallel_per_element_32_32_tiled<<<grid, block, K*K*sizeof(float)>>>(d_in, d_out,N, K);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
timer.Start();
transpose_parallel_per_element_32_32_tiled_padded1<<<grid,block, K*K*sizeof(float)>>>(d_in, d_out, N, K);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_padded %dx%d: %g ms.\nVerifying...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
K = 16;
dim3 block16(K, K);
dim3 grid16(N/K, N/K);
timer.Start();
transpose_parallel_per_element_16_16_tiled<<<grid16, block16,K*K*sizeof(float)>>>(d_in, d_out, N, K);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
timer.Start();
transpose_parallel_per_element_16_16_tiled_padded1<<<grid16, block16, K*K*sizeof(float)>>>(d_in, d_out, N, K);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_padded %dx%d: %g ms.\nVerifying...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold,N) ? "Failed" : "Success");
//free data
free(in);
free(out);
free(gold);
cudaFree(d_in);
cudaFree(d_out);
}
|
7246fbac01b756116a0b1216f271e8e3dbe5d375.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define GRADIENT_SIZE 16
#define C_X_MIN -0.188
#define C_X_MAX -0.012
#define C_Y_MIN 0.554
#define C_Y_MAX 0.754
#define IMAGE_SIZE 4096
#define ARRAY_SIZE (3 * IMAGE_SIZE * IMAGE_SIZE * sizeof(unsigned char))
#define PIXEL_WIDTH ((C_X_MAX - C_X_MIN) / IMAGE_SIZE)
#define PIXEL_HEIGHT ((C_Y_MAX - C_Y_MIN) / IMAGE_SIZE)
#define ITERATION_MAX 200
int colors[51] = {
66, 30, 15,
25, 7, 26,
9, 1, 47,
4, 4, 73,
0, 7, 100,
12, 44, 138,
24, 82, 177,
57, 125, 209,
134, 181, 229,
211, 236, 248,
241, 233, 191,
248, 201, 95,
255, 170, 0,
204, 128, 0,
153, 87, 0,
106, 52, 3,
16, 16, 16,
};
int *d_colors;
int x_grid;
int y_grid;
int x_block;
int y_block;
unsigned char *image_buffer;
unsigned char *d_image_buffer;
hipEvent_t allocation_start, allocation_end;
hipEvent_t computing_start, computing_end;
hipEvent_t memcpy_start, memcpy_end;
float allocation_time, computing_time, memcpy_time;
void init(int argc, char *argv[])
{
if (argc != 5) {
printf("usage: ./mandelbrot_cu x_grid y_grid x_blocks y_blocks");
exit(0);
}
sscanf(argv[1], "%d", &x_grid);
sscanf(argv[2], "%d", &y_grid);
sscanf(argv[3], "%d", &x_block);
sscanf(argv[4], "%d", &y_block);
hipEventCreate(&allocation_start); hipEventCreate(&allocation_end);
hipEventCreate(&computing_start); hipEventCreate(&computing_end);
hipEventCreate(&memcpy_start); hipEventCreate(&memcpy_end);
hipEventRecord(allocation_start, 0);
hipHostMalloc((void **) &image_buffer, ARRAY_SIZE);
hipMalloc((void **) &d_image_buffer, ARRAY_SIZE);
hipMalloc((void **) &d_colors, 51 * sizeof(int));
hipMemcpy(d_colors, colors, 51 * sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(allocation_end, 0);
}
void write_to_file()
{
FILE * file;
const char *filename = "output.ppm";
const char *comment = "# ";
int max_color_component_value = 255;
file = fopen(filename,"wb");
fprintf(file, "P6\n %s\n %d\n %d\n %d\n", comment,
IMAGE_SIZE, IMAGE_SIZE, max_color_component_value);
for(int i = 0; i < IMAGE_SIZE * IMAGE_SIZE; i++){
fwrite(&image_buffer[3 * i], 1 , 3, file);
};
fclose(file);
}
__global__ void gpu_compute_mandelbrot(unsigned char *buffer, int *colors_d)
{
double z_x = 0.0;
double z_y = 0.0;
double z_x_squared = 0.0;
double z_y_squared = 0.0;
double escape_radius_squared = 4;
double c_x;
double c_y;
int i_y = blockIdx.y * blockDim.y + threadIdx.y;
int i_x = blockIdx.x * blockDim.x + threadIdx.x;
int color;
int iteration;
c_y = C_Y_MIN + i_y * PIXEL_HEIGHT;
if (fabs(c_y) < PIXEL_HEIGHT / 2)
c_y = 0.0;
c_x = C_X_MIN + i_x * PIXEL_WIDTH;
for (iteration = 0;
iteration < ITERATION_MAX && \
((z_x_squared + z_y_squared) < escape_radius_squared);
iteration++) {
z_y = 2 * z_x * z_y + c_y;
z_x = z_x_squared - z_y_squared + c_x;
z_x_squared = z_x * z_x;
z_y_squared = z_y * z_y;
}
color = (iteration == ITERATION_MAX) ? GRADIENT_SIZE : (iteration % GRADIENT_SIZE);
for (int i = 0; i < 3; i++) {
buffer[3 * ((IMAGE_SIZE * i_y) + i_x) + i] = colors_d[(3 * color) + i];
}
}
void compute_mandelbrot()
{
hipLaunchKernelGGL(( gpu_compute_mandelbrot), dim3(dim3(x_grid, y_grid)), dim3(dim3(x_block, y_block)), 0, 0, d_image_buffer, d_colors);
hipDeviceSynchronize();
hipEventRecord(memcpy_start, 0);
hipMemcpy(image_buffer, d_image_buffer, ARRAY_SIZE, hipMemcpyDeviceToHost);
hipEventRecord(memcpy_end, 0);
}
int main(int argc, char *argv[])
{
init(argc, argv);
hipEventRecord(computing_start, 0);
compute_mandelbrot();
hipEventRecord(computing_end, 0);
write_to_file();
hipHostFree(image_buffer); hipFree(d_image_buffer);
hipFree(d_colors);
hipEventElapsedTime(&allocation_time, allocation_start, allocation_end);
hipEventElapsedTime(&computing_time, computing_start, computing_end);
hipEventElapsedTime(&memcpy_time, memcpy_start, memcpy_end);
printf("%.6f\n", computing_time / 1000.0);
return 0;
} | 7246fbac01b756116a0b1216f271e8e3dbe5d375.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define GRADIENT_SIZE 16
#define C_X_MIN -0.188
#define C_X_MAX -0.012
#define C_Y_MIN 0.554
#define C_Y_MAX 0.754
#define IMAGE_SIZE 4096
#define ARRAY_SIZE (3 * IMAGE_SIZE * IMAGE_SIZE * sizeof(unsigned char))
#define PIXEL_WIDTH ((C_X_MAX - C_X_MIN) / IMAGE_SIZE)
#define PIXEL_HEIGHT ((C_Y_MAX - C_Y_MIN) / IMAGE_SIZE)
#define ITERATION_MAX 200
int colors[51] = {
66, 30, 15,
25, 7, 26,
9, 1, 47,
4, 4, 73,
0, 7, 100,
12, 44, 138,
24, 82, 177,
57, 125, 209,
134, 181, 229,
211, 236, 248,
241, 233, 191,
248, 201, 95,
255, 170, 0,
204, 128, 0,
153, 87, 0,
106, 52, 3,
16, 16, 16,
};
int *d_colors;
int x_grid;
int y_grid;
int x_block;
int y_block;
unsigned char *image_buffer;
unsigned char *d_image_buffer;
cudaEvent_t allocation_start, allocation_end;
cudaEvent_t computing_start, computing_end;
cudaEvent_t memcpy_start, memcpy_end;
float allocation_time, computing_time, memcpy_time;
void init(int argc, char *argv[])
{
if (argc != 5) {
printf("usage: ./mandelbrot_cu x_grid y_grid x_blocks y_blocks");
exit(0);
}
sscanf(argv[1], "%d", &x_grid);
sscanf(argv[2], "%d", &y_grid);
sscanf(argv[3], "%d", &x_block);
sscanf(argv[4], "%d", &y_block);
cudaEventCreate(&allocation_start); cudaEventCreate(&allocation_end);
cudaEventCreate(&computing_start); cudaEventCreate(&computing_end);
cudaEventCreate(&memcpy_start); cudaEventCreate(&memcpy_end);
cudaEventRecord(allocation_start, 0);
cudaMallocHost((void **) &image_buffer, ARRAY_SIZE);
cudaMalloc((void **) &d_image_buffer, ARRAY_SIZE);
cudaMalloc((void **) &d_colors, 51 * sizeof(int));
cudaMemcpy(d_colors, colors, 51 * sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(allocation_end, 0);
}
void write_to_file()
{
FILE * file;
const char *filename = "output.ppm";
const char *comment = "# ";
int max_color_component_value = 255;
file = fopen(filename,"wb");
fprintf(file, "P6\n %s\n %d\n %d\n %d\n", comment,
IMAGE_SIZE, IMAGE_SIZE, max_color_component_value);
for(int i = 0; i < IMAGE_SIZE * IMAGE_SIZE; i++){
fwrite(&image_buffer[3 * i], 1 , 3, file);
};
fclose(file);
}
__global__ void gpu_compute_mandelbrot(unsigned char *buffer, int *colors_d)
{
double z_x = 0.0;
double z_y = 0.0;
double z_x_squared = 0.0;
double z_y_squared = 0.0;
double escape_radius_squared = 4;
double c_x;
double c_y;
int i_y = blockIdx.y * blockDim.y + threadIdx.y;
int i_x = blockIdx.x * blockDim.x + threadIdx.x;
int color;
int iteration;
c_y = C_Y_MIN + i_y * PIXEL_HEIGHT;
if (fabs(c_y) < PIXEL_HEIGHT / 2)
c_y = 0.0;
c_x = C_X_MIN + i_x * PIXEL_WIDTH;
for (iteration = 0;
iteration < ITERATION_MAX && \
((z_x_squared + z_y_squared) < escape_radius_squared);
iteration++) {
z_y = 2 * z_x * z_y + c_y;
z_x = z_x_squared - z_y_squared + c_x;
z_x_squared = z_x * z_x;
z_y_squared = z_y * z_y;
}
color = (iteration == ITERATION_MAX) ? GRADIENT_SIZE : (iteration % GRADIENT_SIZE);
for (int i = 0; i < 3; i++) {
buffer[3 * ((IMAGE_SIZE * i_y) + i_x) + i] = colors_d[(3 * color) + i];
}
}
void compute_mandelbrot()
{
gpu_compute_mandelbrot<<<dim3(x_grid, y_grid), dim3(x_block, y_block)>>>(d_image_buffer, d_colors);
cudaDeviceSynchronize();
cudaEventRecord(memcpy_start, 0);
cudaMemcpy(image_buffer, d_image_buffer, ARRAY_SIZE, cudaMemcpyDeviceToHost);
cudaEventRecord(memcpy_end, 0);
}
int main(int argc, char *argv[])
{
init(argc, argv);
cudaEventRecord(computing_start, 0);
compute_mandelbrot();
cudaEventRecord(computing_end, 0);
write_to_file();
cudaFreeHost(image_buffer); cudaFree(d_image_buffer);
cudaFree(d_colors);
cudaEventElapsedTime(&allocation_time, allocation_start, allocation_end);
cudaEventElapsedTime(&computing_time, computing_start, computing_end);
cudaEventElapsedTime(&memcpy_time, memcpy_start, memcpy_end);
printf("%.6f\n", computing_time / 1000.0);
return 0;
} |
4700e37fea5b10cc808c42c9262a750c74d22acb.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.hip"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#endif
using namespace std;
using namespace thrust::placeholders;
std::clock_t tot;
unsigned long long int total_count = 0;
unsigned int total_segments = 0;
unsigned int total_max;
unsigned int process_count;
map <unsigned int, unsigned int> str_offset;
long long int totalRecs = 0;
bool fact_file_loaded = 1;
char map_check;
void* d_v = NULL;
void* s_v = NULL;
unsigned int oldCount;
queue<string> op_sort;
queue<string> op_type;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<string> col_aliases;
void* alloced_tmp;
unsigned int alloced_sz = 0;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string,string> setMap; //map to keep track of column names and set names
struct is_match
{
__host__ __device__
bool operator()(unsigned int x)
{
return x != 4294967295;
}
};
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return !(((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
struct l_to_ui
{
__host__ __device__
float_type operator()(const int_type x)
{
return (unsigned int)x;
}
};
struct float_to_decimal
{
__host__ __device__
float_type operator()(const float_type x)
{
return (int_type)(x*100);
}
};
struct to_zero
{
__host__ __device__
bool operator()(const int_type x)
{
if(x == -1)
return 0;
else
return 1;
}
};
struct div_long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x, const float_type y)
{
return (float_type)x/y;
}
};
struct long_to_float
{
__host__ __device__
float_type operator()(const long long int x)
{
return (((float_type)x)/100.0);
}
};
// trim from start
static inline std::string <rim(std::string &s) {
s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun<int, int>(std::isspace))));
return s;
}
// trim from end
static inline std::string &rtrim(std::string &s) {
s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
return s;
}
// trim from both ends
static inline std::string &trim(std::string &s) {
return ltrim(rtrim(s));
}
char *mystrtok(char **m,char *s,char c)
{
char *p=s?s:*m;
if( !*p )
return 0;
*m=strchr(p,c);
if( *m )
*(*m)++=0;
else
*m=p+strlen(p);
return p;
}
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, unsigned int& count);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int count, unsigned int g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int count, unsigned int g_size);
void write_compressed_char(string file_name, unsigned int index, unsigned int mCount);
unsigned long long int largest_prm(CudaSet* a);
unsigned int max_tmp(CudaSet* a);
unsigned int curr_segment = 10000000;
size_t getFreeMem();
char zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a, unsigned int segment);
float total_time1 = 0;
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
keep = false;
partial_load = 0;
source = 1;
text_source = 1;
grp = NULL;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
keep = false;
partial_load = 1;
source = 1;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(unsigned int RecordCount, unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(queue<string> op_sel, queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(unsigned int colIndex, unsigned long long int RecordCount)
{
if (type[colIndex] == 0) {
d_columns_int[type_index[colIndex]].resize(RecordCount);
}
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(RecordCount);
else {
void* d;
unsigned long long int sz = (unsigned long long int)RecordCount*char_size[type_index[colIndex]];
hipError_t cudaStatus = hipMalloc(&d, sz);
if(cudaStatus != hipSuccess) {
cout << "Could not allocate " << sz << " bytes of GPU memory for " << RecordCount << " records " << endl;
exit(0);
};
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
void CudaSet::decompress_char_hash(unsigned int colIndex, unsigned int segment, unsigned int i_cnt)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count, old_count;
const unsigned int len = char_size[type_index[colIndex]];
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
strcat(f1,".");
itoaa(segment,col_pos);
strcat(f1,col_pos);
FILE* f;
f = fopen (f1 , "rb" );
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
unsigned long long int* hashes = new unsigned long long int[sz];
for(unsigned int i = 0; i < sz ; i++) {
hashes[i] = MurmurHash64A(&d_array[i*len], len, hash_seed); // divide by 2 so it will fit into a signed long long
};
void* d;
hipMalloc((void **) &d, sz*int_size);
hipMemcpy( d, (void *) hashes, sz*8, hipMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> dd_int((unsigned long long int*)d);
delete[] d_array;
delete[] hashes;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
hipMalloc((void **) &d_val, vals_count*8);
hipMemcpy(d_val, (void *) int_array, vals_count*8, hipMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> mval((unsigned long long int*)d_val);
delete[] int_array;
void* d_int;
hipMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
void* d_v;
hipMalloc((void **) &d_v, 8);
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v);
dd_v[1] = fit_count;
dd_v[0] = bits_encoded;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)d_v);
thrust::for_each(begin, begin + real_count, ff);
//thrust::device_ptr<long long int> dd_int((long long int*)d);
thrust::device_ptr<unsigned int> dd_val((unsigned int*)d_int);
if(!prm.empty()) {
if(prm_index[segment] == 'R') {
thrust::device_ptr<int_type> d_tmp = thrust::device_malloc<int_type>(real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_tmp);
if(prm_d.size() == 0) // find the largest prm segment
prm_d.resize(largest_prm(this));
hipMemcpy((void**)(thrust::raw_pointer_cast(prm_d.data())), (void**)prm[segment],
4*prm_count[segment], hipMemcpyHostToDevice);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + prm_count[segment]);
thrust::gather(prm_d.begin(), prm_d.begin() + prm_count[segment], d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index[segment] == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
};
hipFree(d);
hipFree(d_val);
hipFree(d_v);
hipFree(d_int);
};
// takes a char column , hashes strings, copies them to a gpu
void CudaSet::add_hashed_strings(string field, unsigned int segment, unsigned int i_cnt)
{
unsigned int colInd2 = columnNames.find(field)->second;
CudaSet *t = varNames[setMap[field]];
if(not_compressed) { // decompressed strings on a host
unsigned int old_count;
unsigned long long int* hashes = new unsigned long long int[t->mRecCount];
for(unsigned int i = 0; i < t->mRecCount ; i++) {
hashes[i] = MurmurHash64A(t->h_columns_char[t->type_index[colInd2]] + i*t->char_size[t->type_index[colInd2]] + segment*t->maxRecs*t->char_size[t->type_index[colInd2]], t->char_size[t->type_index[colInd2]], hash_seed);
};
if(!prm.empty()) {
if(prm_index[segment] == 'R') {
thrust::device_ptr<unsigned long long int> d_tmp = thrust::device_malloc<unsigned long long int>(t->mRecCount);
thrust::copy(hashes, hashes+mRecCount, d_tmp);
if(prm_d.size() == 0) // find the largest prm segment
prm_d.resize(largest_prm(this));
hipMemcpy((void**)(thrust::raw_pointer_cast(prm_d.data())), (void**)prm[segment],
4*prm_count[segment], hipMemcpyHostToDevice);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + prm_count[segment]);
thrust::gather(prm_d.begin(), prm_d.begin() + prm_count[segment], d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index[segment] == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
delete [] hashes;
}
else { // hash the dictionary
decompress_char_hash(colInd2, segment, i_cnt);
};
};
void CudaSet::resize_join(unsigned int addRecs)
{
mRecCount = mRecCount + addRecs;
bool prealloc = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
if(type[i] == 0) {
h_columns_int[type_index[i]].resize(mRecCount);
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(mRecCount);
}
else {
if (h_columns_char[type_index[i]]) {
if (mRecCount > prealloc_char_size) {
h_columns_char[type_index[i]] = (char*)realloc(h_columns_char[type_index[i]], (unsigned long long int)mRecCount*(unsigned long long int)char_size[type_index[i]]);
prealloc = 1;
};
}
else {
h_columns_char[type_index[i]] = new char[(unsigned long long int)mRecCount*(unsigned long long int)char_size[type_index[i]]];
};
};
};
if(prealloc)
prealloc_char_size = mRecCount;
};
void CudaSet::resize(unsigned int addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0) {
h_columns_int[type_index[i]].resize(mRecCount);
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(mRecCount);
}
else {
if (h_columns_char[type_index[i]]) {
h_columns_char[type_index[i]] = (char*)realloc(h_columns_char[type_index[i]], (unsigned long long int)mRecCount*(unsigned long long int)char_size[type_index[i]]);
}
else {
h_columns_char[type_index[i]] = new char[(unsigned long long int)mRecCount*(unsigned long long int)char_size[type_index[i]]];
};
};
};
};
void CudaSet::reserve(unsigned int Recs)
{
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0)
h_columns_int[type_index[i]].reserve(Recs);
else if(type[i] == 1)
h_columns_float[type_index[i]].reserve(Recs);
else {
h_columns_char[type_index[i]] = new char[(unsigned long long int)Recs*(unsigned long long int)char_size[type_index[i]]];
if(h_columns_char[type_index[i]] == NULL) {
cout << "Could not allocate on a host " << Recs << " records of size " << char_size[type_index[i]] << endl;
exit(0);
};
prealloc_char_size = Recs;
};
};
};
void CudaSet::deAllocColumnOnDevice(unsigned int colIndex)
{
if (type[colIndex] == 0 && !d_columns_int.empty()) {
d_columns_int[type_index[colIndex]].resize(0);
d_columns_int[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 1 && !d_columns_float.empty()) {
d_columns_float[type_index[colIndex]].resize(0);
d_columns_float[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 2 && d_columns_char[type_index[colIndex]] != NULL) {
hipFree(d_columns_char[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = NULL;
};
};
void CudaSet::allocOnDevice(unsigned long long int RecordCount)
{
for(unsigned int i=0; i < mColumnCount; i++)
allocColumnOnDevice(i, RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i <mColumnCount; i++)
deAllocColumnOnDevice(i);
if(!columnGroups.empty() && mRecCount !=0) {
hipFree(grp);
grp = NULL;
};
if(!prm.empty()) { // free the sources
string some_field;
map<string,int>::iterator it=columnNames.begin();
some_field = (*it).first;
if(setMap[some_field].compare(name)) {
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(unsigned int RecCount, unsigned int colIndex)
{
if (RecCount) {
if (type[colIndex] == 0)
d_columns_int[type_index[colIndex]].resize(mRecCount+RecCount);
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(mRecCount+RecCount);
else {
if (d_columns_char[type_index[colIndex]] != NULL)
hipFree(d_columns_char[type_index[colIndex]]);
void *d;
hipMalloc((void **) &d, (mRecCount+RecCount)*char_size[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
};
void CudaSet::resizeDevice(unsigned int RecCount)
{
if (RecCount)
for(unsigned int i=0; i < mColumnCount; i++)
resizeDeviceColumn(RecCount, i);
};
bool CudaSet::onDevice(unsigned int i)
{
unsigned j = type_index[i];
if (type[i] == 0) {
if (d_columns_int.empty())
return 0;
if (d_columns_int[j].size() == 0)
return 0;
}
else if (type[i] == 1) {
if (d_columns_float.empty())
return 0;
if(d_columns_float[j].size() == 0)
return 0;
}
else if (type[i] == 2) {
if(d_columns_char.empty())
return 0;
if(d_columns_char[j] == NULL)
return 0;
};
return 1;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
a->columnNames[(*it).first] = (*it).second;
for(unsigned int i=0; i < mColumnCount; i++) {
a->cols[i] = cols[i];
a->type[i] = type[i];
if(a->type[i] == 0) {
a->d_columns_int.push_back(thrust::device_vector<int_type>());
a->h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >());
a->type_index[i] = a->d_columns_int.size()-1;
}
else if(a->type[i] == 1) {
a->d_columns_float.push_back(thrust::device_vector<float_type>());
a->h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
a->type_index[i] = a->d_columns_float.size()-1;
a->decimal[i] = decimal[i];
}
else {
a->h_columns_char.push_back(NULL);
a->d_columns_char.push_back(NULL);
a->type_index[i] = a->d_columns_char.size()-1;
a->char_size.push_back(char_size[type_index[i]]);
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
unsigned long long int CudaSet::readSegmentsFromFile(unsigned int segNum, unsigned int colIndex)
{
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
unsigned int cnt;
strcat(f1,".");
itoaa(segNum,col_pos);
strcat(f1,col_pos);
std::clock_t start1 = std::clock();
FILE* f;
f = fopen(f1, "rb" );
if(f == NULL) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
size_t rr;
if(type[colIndex] == 0) {
fread(h_columns_int[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_int[type_index[colIndex]].data()))[0];
//cout << "start fread " << f1 << " " << (cnt+8)*8 - 4 << endl;
rr = fread((unsigned int*)(h_columns_int[type_index[colIndex]].data()) + 1, 1, (cnt+8)*8 - 4, f);
if(rr != (cnt+8)*8 - 4) {
cout << "Couldn't read " << (cnt+8)*8 - 4 << " bytes from " << f1 << endl;
exit(0);
};
//cout << "end fread " << rr << endl;
}
else if(type[colIndex] == 1) {
fread(h_columns_float[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_float[type_index[colIndex]].data()))[0];
//cout << "start fread " << f1 << " " << (cnt+8)*8 - 4 << endl;
rr = fread((unsigned int*)(h_columns_float[type_index[colIndex]].data()) + 1, 1, (cnt+8)*8 - 4, f);
if(rr != (cnt+8)*8 - 4) {
cout << "Couldn't read " << (cnt+8)*8 - 4 << " bytes from " << f1 << endl;
exit(0);
};
//cout << "end fread " << rr << endl;
}
else {
decompress_char(f, colIndex, segNum);
};
tot = tot + ( std::clock() - start1 );
fclose(f);
return 0;
};
void CudaSet::decompress_char(FILE* f, unsigned int colIndex, unsigned int segNum)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count;
const unsigned int len = char_size[type_index[colIndex]];
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
void* d;
hipMalloc((void **) &d, sz*len);
hipMemcpy( d, (void *) d_array, sz*len, hipMemcpyHostToDevice);
delete[] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
thrust::device_ptr<unsigned int> param = thrust::device_malloc<unsigned int>(2);
param[1] = fit_count;
param[0] = bits_encoded;
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
hipMalloc((void **) &d_val, vals_count*8);
hipMemcpy(d_val, (void *) int_array, vals_count*8, hipMemcpyHostToDevice);
delete[] int_array;
void* d_int;
hipMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)thrust::raw_pointer_cast(param));
thrust::for_each(begin, begin + real_count, ff);
//thrust::device_ptr<unsigned int> dd_r((unsigned int*)d_int);
//for(int z = 0 ; z < 3; z++)
//cout << "DD " << dd_r[z] << endl;
//void* d_char;
//hipMalloc((void **) &d_char, real_count*len);
//hipMemset(d_char, 0, real_count*len);
//str_gather(d_int, real_count, d, d_char, len);
if(str_offset.count(colIndex) == 0)
str_offset[colIndex] = 0;
//cout << "str off " << str_offset[colIndex] << endl;
//cout << "prm cnt of seg " << segNum << " is " << prm.empty() << endl;
if(!alloced_switch)
str_gather(d_int, real_count, d, d_columns_char[type_index[colIndex]] + str_offset[colIndex]*len, len);
else
str_gather(d_int, real_count, d, alloced_tmp, len);
if(!prm.empty()) {
str_offset[colIndex] = str_offset[colIndex] + prm_count[segNum];
}
else {
str_offset[colIndex] = str_offset[colIndex] + real_count;
};
//if(d_columns_char[type_index[colIndex]])
// hipFree(d_columns_char[type_index[colIndex]]);
//d_columns_char[type_index[colIndex]] = (char*)d_char;
mRecCount = real_count;
hipFree(d);
hipFree(d_val);
thrust::device_free(param);
hipFree(d_int);
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex, unsigned int segment)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = oldRecCount - maxRecs*(segCount-1);
};
switch(type[colIndex]) {
case 0 :
if(!alloced_switch)
thrust::copy(h_columns_int[type_index[colIndex]].begin() + maxRecs*segment, h_columns_int[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_columns_int[type_index[colIndex]].begin());
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[type_index[colIndex]].begin() + maxRecs*segment, h_columns_int[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_col);
};
break;
case 1 :
if(!alloced_switch)
thrust::copy(h_columns_float[type_index[colIndex]].begin() + maxRecs*segment, h_columns_float[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_columns_float[type_index[colIndex]].begin());
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[type_index[colIndex]].begin() + maxRecs*segment, h_columns_float[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_col);
};
break;
default :
if(!alloced_switch)
hipMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]] + maxRecs*segment*char_size[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, hipMemcpyHostToDevice);
else
hipMemcpy(alloced_tmp, h_columns_char[type_index[colIndex]] + maxRecs*segment*char_size[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, hipMemcpyHostToDevice);
};
}
else {
unsigned long long int data_offset;
if (partial_load)
data_offset = readSegmentsFromFile(segment,colIndex);
if(type[colIndex] != 2) {
if(d_v == NULL)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
};
if(type[colIndex] == 0) {
if(!alloced_switch) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data()), h_columns_int[type_index[colIndex]].data() + data_offset, d_v, s_v);
}
else {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[type_index[colIndex]].data() + data_offset, d_v, s_v);
};
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
if(!alloced_switch) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()) , h_columns_float[type_index[colIndex]].data() + data_offset, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin(), long_to_float());
}
else {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[type_index[colIndex]].data() + data_offset, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
}
//else // uncompressed float
//hipMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, hipMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex) // copy all segments
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
break;
default :
hipMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, hipMemcpyHostToDevice);
};
}
else {
long long int data_offset;
unsigned long long int totalRecs = 0;
if(d_v == NULL)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
str_offset[colIndex] = 0;
for(unsigned int i = 0; i < segCount; i++) {
if (partial_load)
data_offset = readSegmentsFromFile(i,colIndex);
if(type[colIndex] == 0) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + totalRecs), h_columns_int[type_index[colIndex]].data() + data_offset, d_v, s_v);
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs) , h_columns_float[type_index[colIndex]].data() + data_offset, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin() + totalRecs, long_to_float());
}
// else uncompressed float
//hipMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, hipMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
};
totalRecs = totalRecs + mRecCount;
};
mRecCount = totalRecs;
};
}
void CudaSet::CopyColumnToHost(int colIndex, unsigned int offset, unsigned int RecCount)
{
switch(type[colIndex]) {
case 0 :
thrust::copy(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + RecCount, h_columns_int[type_index[colIndex]].begin() + offset);
break;
case 1 :
thrust::copy(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + RecCount, h_columns_float[type_index[colIndex]].begin() + offset);
break;
default :
hipMemcpy(h_columns_char[type_index[colIndex]] + offset*char_size[type_index[colIndex]], d_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*RecCount, hipMemcpyDeviceToHost);
}
}
void CudaSet::CopyColumnToHost(int colIndex)
{
CopyColumnToHost(colIndex, 0, mRecCount);
}
void CudaSet::CopyToHost(unsigned int offset, unsigned int count)
{
for(unsigned int i = 0; i < mColumnCount; i++) {
CopyColumnToHost(i, offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_int[type_index[colIndex]].data());
}
void CudaSet::GroupBy(stack<string> columnRef, unsigned int int_col_count)
{
int grpInd, colIndex;
if(grp)
hipFree(grp);
CUDA_SAFE_CALL(hipMalloc((void **) &grp, mRecCount * sizeof(bool)));
thrust::device_ptr<bool> d_grp(grp);
thrust::sequence(d_grp, d_grp+mRecCount, 0, 0);
thrust::device_ptr<bool> d_group = thrust::device_malloc<bool>(mRecCount);
d_group[mRecCount-1] = 1;
unsigned int i_count = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
columnGroups.push(columnRef.top()); // save for future references
colIndex = columnNames[columnRef.top()];
if(!onDevice(colIndex)) {
allocColumnOnDevice(colIndex,mRecCount);
CopyColumnToGpu(colIndex, mRecCount);
grpInd = 1;
}
else
grpInd = 0;
if (type[colIndex] == 0) { // int_type
thrust::transform(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_int[type_index[colIndex]].begin()+1, d_group, thrust::not_equal_to<int_type>());
}
else if (type[colIndex] == 1) { // float_type
thrust::transform(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_float[type_index[colIndex]].begin()+1, d_group, f_not_equal_to());
}
else { // Char
//str_grp(d_columns_char[type_index[colIndex]], mRecCount, d_group, char_size[type_index[colIndex]]);
//use int_type
thrust::transform(d_columns_int[int_col_count+i_count].begin(), d_columns_int[int_col_count+i_count].begin() + mRecCount - 1,
d_columns_int[int_col_count+i_count].begin()+1, d_group, thrust::not_equal_to<int_type>());
i_count++;
};
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<bool>());
if (grpInd == 1)
deAllocColumnOnDevice(colIndex);
};
thrust::device_free(d_group);
grp_count = thrust::count(d_grp, d_grp+mRecCount,1);
};
void CudaSet::addDeviceColumn(int_type* col, int colIndex, string colName, unsigned int recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
d_columns_int.push_back(thrust::device_vector<int_type>(recCount));
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >());
type_index[colIndex] = d_columns_int.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_int[type_index[colIndex]].size() < recCount) {
d_columns_int[type_index[colIndex]].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[type_index[colIndex]].begin());
};
void CudaSet::addDeviceColumn(float_type* col, int colIndex, string colName, unsigned int recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
d_columns_float.push_back(thrust::device_vector<float_type>(recCount));
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
type_index[colIndex] = d_columns_float.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_float[type_index[colIndex]].size() < recCount)
d_columns_float[type_index[colIndex]].resize(recCount);
};
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[type_index[colIndex]].begin());
};
void CudaSet::compress(char* file_name, unsigned int offset, unsigned int check_type, unsigned int check_val, void* d, unsigned int mCount)
{
char str[100];
char col_pos[3];
thrust::device_vector<unsigned int> permutation;
total_count = total_count + mCount;
total_segments = total_segments + 1;
if (mCount > total_max)
total_max = mCount;
if(!op_sort.empty()) { //sort the segment
//copy the key columns to device
queue<string> sf(op_sort);
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
cout << "sorting " << getFreeMem() << endl;
unsigned int max_c = max_char(this, sf);
if(max_c > float_size)
CUDA_SAFE_CALL(hipMalloc((void **) &temp, mRecCount*max_c));
else
CUDA_SAFE_CALL(hipMalloc((void **) &temp, mRecCount*float_size));
string sort_type = "ASC";
while(!sf.empty()) {
int colInd = columnNames[sf.front()];
allocColumnOnDevice(colInd, maxRecs);
CopyColumnToGpu(colInd);
if (type[colInd] == 0)
update_permutation(d_columns_int[type_index[colInd]], raw_ptr, mRecCount, sort_type, (int_type*)temp);
else if (type[colInd] == 1)
update_permutation(d_columns_float[type_index[colInd]], raw_ptr, mRecCount, sort_type, (float_type*)temp);
else {
update_permutation_char(d_columns_char[type_index[colInd]], raw_ptr, mRecCount, sort_type, (char*)temp, char_size[type_index[colInd]]);
};
deAllocColumnOnDevice(colInd);
sf.pop();
};
hipFree(temp);
};
for(unsigned int i = 0; i< mColumnCount; i++) {
strcpy(str, file_name);
strcat(str,".");
itoaa(cols[i],col_pos);
strcat(str,col_pos);
curr_file = str;
strcat(str,".");
itoaa(total_segments-1,col_pos);
strcat(str,col_pos);
if(!op_sort.empty()) {
allocColumnOnDevice(i, maxRecs);
CopyColumnToGpu(i);
};
if(type[i] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[type_index[i]].begin(), d_col);
}
else {
thrust::copy(h_columns_int[type_index[i]].begin() + offset, h_columns_int[type_index[i]].begin() + offset + mCount, d_col);
};
pfor_compress( d, mCount*int_size, str, h_columns_int[type_index[i]], 0, 0);
}
else if(type[i] == 1) {
if(decimal[i]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[type_index[i]].begin(), d_col);
}
else {
thrust::copy(h_columns_float[type_index[i]].begin() + offset, h_columns_float[type_index[i]].begin() + offset + mCount, d_col);
};
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[type_index[i]], 1, 0);
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[type_index[i]].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[type_index[i]].begin());
};
fstream binary_file(str,ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[type_index[i]].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else { //char
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[type_index[i]]*mRecCount];
apply_permutation_char_host(h_columns_char[type_index[i]], h_permutation, mRecCount, t, char_size[type_index[i]]);
thrust::copy(t, t+ char_size[type_index[i]]*mRecCount, h_columns_char[type_index[i]]);
delete [] t;
};
compress_char(str, i, mCount, offset);
};
if(check_type == 1) {
if(fact_file_loaded) {
writeHeader(file_name, cols[i]);
}
}
else {
if(check_val == 0) {
writeHeader(file_name, cols[i]);
};
};
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::writeHeader(char* file_name, unsigned int col) {
char str[100];
char col_pos[3];
strcpy(str, file_name);
strcat(str,".");
itoaa(col,col_pos);
strcat(str,col_pos);
string ff = str;
strcat(str,".header");
fstream binary_file(str,ios::out|ios::binary|ios::app);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&total_segments, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
binary_file.close();
};
void CudaSet::writeSortHeader(char* file_name)
{
char str[100];
unsigned int idx;
strcpy(str, file_name);
strcat(str,".sort");
fstream binary_file(str,ios::out|ios::binary|ios::app);
idx = op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
idx = columnNames[os.front()];
binary_file.write((char *)&idx, 4);
os.pop();
};
binary_file.close();
}
void CudaSet::Store(char* file_name, char* sep, unsigned int limit, bool binary )
{
if (mRecCount == 0 && binary == 1) { // write tails
for(unsigned int i = 0; i< mColumnCount; i++) {
writeHeader(file_name, cols[i]);
};
return;
};
unsigned int mCount, cnt;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else
mCount = mRecCount;
if(binary == 0) {
char buffer [33];
queue<string> op_vx;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
op_vx.push((*it).first);
curr_segment = 1000000;
FILE *file_pr = fopen(file_name, "w");
if (file_pr == NULL)
cout << "Could not open file " << file_name << endl;
if(prm.size() || source)
allocColumns(this, op_vx);
unsigned int curr_seg = 0, cnt = 0;
unsigned curr_count, sum_printed = 0;
while(sum_printed < mCount) {
if(prm.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
// if host arrays are empty
unsigned int olRecs = mRecCount;
resize(mRecCount);
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount)
curr_count = mRecCount;
else {
curr_count = mCount - sum_printed;
};
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
string ss;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < mColumnCount; j++) {
if (type[j] == 0) {
sprintf(buffer, "%lld", (h_columns_int[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else if (type[j] == 1) {
sprintf(buffer, "%.2f", (h_columns_float[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else {
ss.assign(h_columns_char[type_index[j]] + (i*char_size[type_index[j]]), char_size[type_index[j]]);
trim(ss);
fputs(ss.c_str(), file_pr);
fputs(sep, file_pr);
};
};
if (i != mCount -1)
fputs("\n",file_pr);
};
curr_seg++;
};
fclose(file_pr);
}
else if(text_source) { //writing a binary file using a text file as a source
//char str[100];
//char col_pos[3];
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, mCount*float_size));
compress(file_name, 0, 1, 0, d, mCount);
writeSortHeader(file_name);
/*for(unsigned int i = 0; i< mColumnCount; i++) {
strcpy(str, file_name);
strcat(str,".");
itoaa(cols[i],col_pos);
strcat(str,col_pos);
curr_file = str;
strcat(str,".");
itoaa(total_segments-1,col_pos);
strcat(str,col_pos);
if(type[i] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::copy(h_columns_int[type_index[i]].begin(), h_columns_int[type_index[i]].begin() + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[type_index[i]], 0, 0);
}
else if(type[i] == 1) {
if(decimal[i]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
thrust::copy(h_columns_float[type_index[i]].begin(), h_columns_float[type_index[i]].begin() + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[type_index[i]], 1, 0);
}
else { // do not compress -- float
fstream binary_file(str,ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[type_index[i]].data()),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else { //char
compress_char(str, i, mCount, 0);
};
if(fact_file_loaded) {
writeHeader(file_name, cols[i]);
};
};
*/
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 2)
deAllocColumnOnDevice(i);
hipFree(d);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
unsigned int offset = 0;
void* d;
if(mRecCount < process_count) {
CUDA_SAFE_CALL(hipMalloc((void **) &d, mRecCount*float_size));
}
else {
CUDA_SAFE_CALL(hipMalloc((void **) &d, process_count*float_size));
};
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it ) {
op_vx.push((*it).first);
};
allocColumns(this, op_vx);
unsigned int oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
cnt = 0;
copyColumns(this, op_vx, i, cnt);
reset_offsets();
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), d, mRecCount);
};
//mRecCount = offset;
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = mRecCount/process_count + 1;
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else
mCount = mRecCount - (segCount-1)*process_count;
compress(file_name, offset, 0, z - (segCount-1), d, mCount);
offset = offset + mCount;
};
hipFree(d);
};
};
}
void CudaSet::compress_char(string file_name, unsigned int index, unsigned int mCount, unsigned int offset)
{
std::map<string,unsigned int> dict;
std::vector<string> dict_ordered;
std::vector<unsigned int> dict_val;
map<string,unsigned int>::iterator iter;
unsigned int bits_encoded;
char* field;
unsigned int len = char_size[type_index[index]];
field = new char[len];
for (unsigned int i = 0 ; i < mCount; i++) {
strncpy(field, h_columns_char[type_index[index]] + (i+offset)*len, char_size[type_index[index]]);
if((iter = dict.find(field)) != dict.end()) {
dict_val.push_back(iter->second);
}
else {
string f = field;
dict[f] = dict.size();
dict_val.push_back(dict.size()-1);
dict_ordered.push_back(f);
};
};
delete [] field;
bits_encoded = (unsigned int)ceil(log2(double(dict.size()+1)));
char *cc = new char[len+1];
unsigned int sz = dict_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary);
binary_file.write((char *)&sz, 4);
for(unsigned int i = 0; i < dict_ordered.size(); i++) {
memset(&cc[0], 0, len);
strcpy(cc,dict_ordered[i].c_str());
binary_file.write(cc, len);
};
delete [] cc;
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, 8);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
int CudaSet::LoadBigFile(const char* file_name, const char* sep )
{
char line[1000];
unsigned int current_column, count = 0, index;
char *p,*t;
if (file_p == NULL)
file_p = fopen(file_name, "r");
if (file_p == NULL) {
cout << "Could not open file " << file_name << endl;
exit(0);
};
map<unsigned int,unsigned int> col_map;
for(unsigned int i = 0; i < mColumnCount; i++) {
col_map[cols[i]] = i;
};
while (count < process_count && fgets(line, 1000, file_p) != NULL) {
strtok(line, "\n");
current_column = 0;
for(t=mystrtok(&p,line,'|');t;t=mystrtok(&p,0,'|')) {
current_column++;
if(col_map.find(current_column) == col_map.end()) {
continue;
};
index = col_map[current_column];
if (type[index] == 0) {
if (strchr(t,'-') == NULL) {
(h_columns_int[type_index[index]])[count] = atoll(t);
}
else { // handling possible dates
strncpy(t+4,t+5,2);
strncpy(t+6,t+8,2);
t[8] = '\0';
(h_columns_int[type_index[index]])[count] = atoll(t);
};
}
else if (type[index] == 1) {
(h_columns_float[type_index[index]])[count] = atoff(t);
}
else {//char
strcpy(h_columns_char[type_index[index]] + count*char_size[type_index[index]], t);
}
};
count++;
};
mRecCount = count;
if(count < process_count) {
fclose(file_p);
return 1;
}
else
return 0;
};
void CudaSet::free() {
if (!seq)
delete seq;
for(unsigned int i = 0; i < mColumnCount; i++ ) {
if(type[i] == 2 && h_columns_char[type_index[i]] && prm.empty()) {
delete [] h_columns_char[type_index[i]];
h_columns_char[type_index[i]] = NULL;
}
else {
if(type[i] == 0 ) {
h_columns_int[type_index[i]].resize(0);
h_columns_int[type_index[i]].shrink_to_fit();
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(0);
h_columns_float[type_index[i]].shrink_to_fit();
};
}
}
if(!prm.empty()) { // free the sources
string some_field;
map<string,int>::iterator it=columnNames.begin();
some_field = (*it).first;
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
delete type;
delete cols;
if(!columnGroups.empty() && mRecCount !=0 && grp != NULL)
hipFree(grp);
for(unsigned int i = 0; i < prm.size(); i++)
delete [] prm[i];
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
}
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
};
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name) // compressed data for DIM tables
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
unsigned int cnt;
file_p = NULL;
FILE* f;
char f1[100];
prealloc_char_size = 0;
not_compressed = 0;
mRecCount = Recs;
oldRecCount = Recs;
load_file_name = file_name;
strcpy(f1, file_name);
strcat(f1, ".sort");
cout << "opening " << f1 << endl;
f = fopen (f1 , "rb" );
if(f != NULL) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
sorted_fields.push(idx);
//cout << "presorted on " << idx << endl;
};
fclose(f);
};
tmp_table = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
strcpy(f1, file_name);
strcat(f1,".");
char col_pos[3];
itoaa(colsRef.front(),col_pos);
strcat(f1,col_pos); // read the size of a segment
strcat(f1, ".header");
f = fopen (f1 , "rb" );
for(unsigned int j = 0; j < 5; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
//cout << "creating " << f1 << " " << cnt << endl;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >(cnt + 9));
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type >());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
prealloc_char_size = 0;
file_p = NULL;
tmp_table = 0;
mRecCount = Recs;
oldRecCount = Recs;
segCount = 1;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(unsigned int RecordCount, unsigned int ColumnCount)
{
mRecCount = RecordCount;
oldRecCount = RecordCount;
mColumnCount = ColumnCount;
prealloc_char_size = 0;
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
seq = 0;
for(unsigned int i =0; i < mColumnCount; i++) {
cols[i] = i;
};
};
void CudaSet::initialize(queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = op_sel.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
seq = 0;
segCount = 1;
not_compressed = 1;
col_aliases = op_sel_as;
prealloc_char_size = 0;
unsigned int index;
unsigned int i = 0;
while(!op_sel.empty()) {
if(!setMap.count(op_sel.front())) {
cout << "coudn't find column " << op_sel.front() << endl;
exit(0);
};
CudaSet* a = varNames[setMap[op_sel.front()]];
if(i == 0)
maxRecs = a->maxRecs;
index = a->columnNames[op_sel.front()];
cols[i] = i;
decimal[i] = a->decimal[i];
columnNames[op_sel.front()] = i;
if (a->type[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(a->char_size[a->type_index[index]]);
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if(a->columnNames.find(q_cnt.front()) != a->columnNames.end() || b->columnNames.find(q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = field_names.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
maxRecs = b->maxRecs;
map<string,int>::iterator it;
seq = 0;
segCount = 1;
not_compressed = 1;
col_aliases = op_sel_as;
prealloc_char_size = 0;
unsigned int index;
i = 0;
while(!op_sel.empty() && (columnNames.find(op_sel.front()) == columnNames.end())) {
if((it = a->columnNames.find(op_sel.front())) != a->columnNames.end()) {
index = it->second;
cols[i] = i;
decimal[i] = a->decimal[i];
columnNames[op_sel.front()] = i;
if (a->type[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(a->char_size[a->type_index[index]]);
};
i++;
}
else if((it = b->columnNames.find(op_sel.front())) != b->columnNames.end()) {
index = it->second;
columnNames[op_sel.front()] = i;
cols[i] = i;
decimal[i] = b->decimal[index];
if ((b->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((b->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(b->char_size[b->type_index[index]]);
};
i++;
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 5;
else if (op_type == 1) // <
return 6;
else if (op_type == 6) // >=
return 1;
else if (op_type == 5) // <=
return 2;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
hipMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(!a->prm.empty()) {
unsigned int max_sz = max_tmp(a) ;
CudaSet* t = varNames[setMap[fields.front()]];
if(max_sz*t->maxRecs > alloced_sz) {
if(alloced_sz) {
hipFree(alloced_tmp);
};
hipMalloc((void **) &alloced_tmp, max_sz*t->maxRecs);
alloced_sz = max_sz*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(setMap.count(fields.front()) > 0) {
unsigned int idx = a->columnNames[fields.front()];
bool onDevice = 0;
if(a->type[idx] == 0) {
if(a->d_columns_int[a->type_index[idx]].size() > 0) {
onDevice = 1;
}
}
else if(a->type[idx] == 1) {
if(a->d_columns_float[a->type_index[idx]].size() > 0) {
onDevice = 1;
};
}
else {
if((a->d_columns_char[a->type_index[idx]]) != NULL) {
onDevice = 1;
};
};
if (!onDevice) {
if(a->prm.empty()) {
a->allocColumnOnDevice(idx, a->maxRecs);
}
else {
a->allocColumnOnDevice(idx, largest_prm(a));
};
}
}
fields.pop();
};
};
}
unsigned long long int largest_prm(CudaSet* a)
{
unsigned long long int maxx = 0;
for(unsigned int i = 0; i < a->prm_count.size(); i++)
if(maxx < a->prm_count[i])
maxx = a->prm_count[i];
if(maxx == 0)
maxx = a->maxRecs;
return maxx;
};
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, unsigned int& count)
{
unsigned int tindex = t->columnNames[field];
unsigned int idx = a->columnNames[field];
//find the largest possible size of a gathered segment
if(!a->onDevice(idx)) {
unsigned int max_count = 0;
for(unsigned int i = 0; i < a->prm.size(); i++)
if (a->prm_count[i] > max_count)
max_count = a->prm_count[i];
a->allocColumnOnDevice(idx, max_count);
};
unsigned int g_size = a->prm_count[segment];
if(a->prm_index[segment] == 'R') {
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a));
if(curr_segment != segment) {
hipMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[segment],
4*g_size, hipMemcpyHostToDevice);
curr_segment = segment;
};
mygather(tindex, idx, a, t, count, g_size);
}
else {
mycopy(tindex, idx, a, t, count, g_size);
};
a->mRecCount = g_size;
}
unsigned int getSegmentRecCount(CudaSet* a, unsigned int segment) {
if (segment == a->segCount-1) {
return oldCount - a->maxRecs*segment;
}
else
return a->maxRecs;
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, unsigned int& count)
{
set<string> uniques;
CudaSet *t;
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && setMap.count(fields.front()) > 0) {
if(!a->prm.empty()) {
t = varNames[setMap[fields.front()]];
if(a->prm_count[segment]) {
alloced_switch = 1;
t->CopyColumnToGpu(t->columnNames[fields.front()], segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
}
else
a->mRecCount = 0;
}
else {
a->CopyColumnToGpu(a->columnNames[fields.front()], segment);
};
uniques.insert(fields.front());
};
fields.pop();
};
}
void setPrm(CudaSet* a, CudaSet* b, char val, unsigned int segment) {
b->prm.push_back(NULL);
b->prm_index.push_back(val);
if (val == 'A') {
b->mRecCount = b->mRecCount + getSegmentRecCount(a,segment);
b->prm_count.push_back(getSegmentRecCount(a, segment));
}
else {
b->prm_count.push_back(0);
};
}
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int offset, unsigned int g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
(void*)t->d_columns_char[t->type_index[tindex]], (void*)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), a->char_size[a->type_index[idx]] );
}
else {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
alloced_tmp, (void*)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), a->char_size[a->type_index[idx]] );
};
}
};
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int offset, unsigned int g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::copy(t->d_columns_int[t->type_index[tindex]].begin(), t->d_columns_int[t->type_index[tindex]].begin() + g_size,
a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::copy(t->d_columns_float[t->type_index[tindex]].begin(), t->d_columns_float[t->type_index[tindex]].begin() + g_size,
a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
hipMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), (void**)t->d_columns_char[t->type_index[tindex]],
g_size*t->char_size[t->type_index[tindex]], hipMemcpyDeviceToDevice);
}
else {
hipMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), alloced_tmp,
g_size*t->char_size[t->type_index[tindex]], hipMemcpyDeviceToDevice);
};
};
};
unsigned int load_queue(queue<string> c1, CudaSet* right, bool str_join, string f2, unsigned int &rcount)
{
queue<string> cc;
while(!c1.empty()) {
if(right->columnNames.find(c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() || str_join) {
cc.push(c1.front());
};
};
c1.pop();
};
if(!str_join && right->columnNames.find(f2) != right->columnNames.end()) {
cc.push(f2);
};
unsigned int cnt_r = 0;
if(!right->prm.empty()) {
allocColumns(right, cc);
rcount = std::accumulate(right->prm_count.begin(), right->prm_count.end(), 0 );
}
else
rcount = right->mRecCount;
queue<string> ct(cc);
reset_offsets();
while(!ct.empty()) {
right->allocColumnOnDevice(right->columnNames[ct.front()], rcount);
ct.pop();
};
ct = cc;
if(right->prm.empty()) {
//copy all records
while(!ct.empty()) {
right->CopyColumnToGpu(right->columnNames[ct.front()]);
ct.pop();
};
cnt_r = right->mRecCount;
}
else {
//copy and gather all records
for(unsigned int i = 0; i < right->segCount; i++) {
reset_offsets();
copyColumns(right, cc, i, cnt_r);
cnt_r = cnt_r + right->prm_count[i];
};
};
return cnt_r;
}
unsigned int max_char(CudaSet* a)
{
unsigned int max_char = 0;
for(unsigned int i = 0; i < a->char_size.size(); i++)
if (a->char_size[i] > max_char)
max_char = a->char_size[i];
return max_char;
};
unsigned int max_char(CudaSet* a, set<string> field_names)
{
unsigned int max_char = 0, i;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
i = a->columnNames[*it];
if (a->type[i] == 2) {
if (a->char_size[a->type_index[i]] > max_char)
max_char = a->char_size[a->type_index[i]];
};
};
return max_char;
};
unsigned int max_char(CudaSet* a, queue<string> field_names)
{
unsigned int max_char = 0, i;
while (!field_names.empty()) {
i = a->columnNames[field_names.front()];
if (a->type[i] == 2) {
if (a->char_size[a->type_index[i]] > max_char)
max_char = a->char_size[a->type_index[i]];
};
field_names.pop();
};
return max_char;
};
unsigned int max_tmp(CudaSet* a)
{
unsigned int max_sz = 0;
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[i] == 0) {
if(int_size > max_sz)
max_sz = int_size;
}
else if(a->type[i] == 1) {
if(float_size > max_sz)
max_sz = float_size;
};
};
unsigned int m_char = max_char(a);
if(m_char > max_sz)
return m_char;
else
return max_sz;
};
void reset_offsets() {
map<unsigned int, unsigned int>::iterator iter;
for (iter = str_offset.begin(); iter != str_offset.end(); ++iter) {
iter->second = 0;
};
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
unsigned int tot_sz = 0, idx;
while(!cols.empty()) {
idx = a->columnNames[cols.front()];
if(a->type[idx] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[a->type_index[idx]];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char(char* key, unsigned int* permutation, unsigned int RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather((void*)permutation, RecCount, (void*)key, (void*)tmp, len);
// stable_sort the permuted keys and update the permutation
if (SortType.compare("DESC") == 0 )
str_sort(tmp, RecCount, permutation, 1, len);
else
str_sort(tmp, RecCount, permutation, 0, len);
}
void update_permutation_char_host(char* key, unsigned int* permutation, unsigned int RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, unsigned int RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
hipMemcpy( (void*)tmp, (void*) key, RecCount*len, hipMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, unsigned int RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
| 4700e37fea5b10cc808c42c9262a750c74d22acb.cu | /*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.cu"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#endif
using namespace std;
using namespace thrust::placeholders;
std::clock_t tot;
unsigned long long int total_count = 0;
unsigned int total_segments = 0;
unsigned int total_max;
unsigned int process_count;
map <unsigned int, unsigned int> str_offset;
long long int totalRecs = 0;
bool fact_file_loaded = 1;
char map_check;
void* d_v = NULL;
void* s_v = NULL;
unsigned int oldCount;
queue<string> op_sort;
queue<string> op_type;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<string> col_aliases;
void* alloced_tmp;
unsigned int alloced_sz = 0;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string,string> setMap; //map to keep track of column names and set names
struct is_match
{
__host__ __device__
bool operator()(unsigned int x)
{
return x != 4294967295;
}
};
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return !(((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
struct l_to_ui
{
__host__ __device__
float_type operator()(const int_type x)
{
return (unsigned int)x;
}
};
struct float_to_decimal
{
__host__ __device__
float_type operator()(const float_type x)
{
return (int_type)(x*100);
}
};
struct to_zero
{
__host__ __device__
bool operator()(const int_type x)
{
if(x == -1)
return 0;
else
return 1;
}
};
struct div_long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x, const float_type y)
{
return (float_type)x/y;
}
};
struct long_to_float
{
__host__ __device__
float_type operator()(const long long int x)
{
return (((float_type)x)/100.0);
}
};
// trim from start
static inline std::string <rim(std::string &s) {
s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun<int, int>(std::isspace))));
return s;
}
// trim from end
static inline std::string &rtrim(std::string &s) {
s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
return s;
}
// trim from both ends
static inline std::string &trim(std::string &s) {
return ltrim(rtrim(s));
}
char *mystrtok(char **m,char *s,char c)
{
char *p=s?s:*m;
if( !*p )
return 0;
*m=strchr(p,c);
if( *m )
*(*m)++=0;
else
*m=p+strlen(p);
return p;
}
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, unsigned int& count);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int count, unsigned int g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int count, unsigned int g_size);
void write_compressed_char(string file_name, unsigned int index, unsigned int mCount);
unsigned long long int largest_prm(CudaSet* a);
unsigned int max_tmp(CudaSet* a);
unsigned int curr_segment = 10000000;
size_t getFreeMem();
char zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a, unsigned int segment);
float total_time1 = 0;
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
keep = false;
partial_load = 0;
source = 1;
text_source = 1;
grp = NULL;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
keep = false;
partial_load = 1;
source = 1;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(unsigned int RecordCount, unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(queue<string> op_sel, queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(unsigned int colIndex, unsigned long long int RecordCount)
{
if (type[colIndex] == 0) {
d_columns_int[type_index[colIndex]].resize(RecordCount);
}
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(RecordCount);
else {
void* d;
unsigned long long int sz = (unsigned long long int)RecordCount*char_size[type_index[colIndex]];
cudaError_t cudaStatus = cudaMalloc(&d, sz);
if(cudaStatus != cudaSuccess) {
cout << "Could not allocate " << sz << " bytes of GPU memory for " << RecordCount << " records " << endl;
exit(0);
};
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
void CudaSet::decompress_char_hash(unsigned int colIndex, unsigned int segment, unsigned int i_cnt)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count, old_count;
const unsigned int len = char_size[type_index[colIndex]];
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
strcat(f1,".");
itoaa(segment,col_pos);
strcat(f1,col_pos);
FILE* f;
f = fopen (f1 , "rb" );
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
unsigned long long int* hashes = new unsigned long long int[sz];
for(unsigned int i = 0; i < sz ; i++) {
hashes[i] = MurmurHash64A(&d_array[i*len], len, hash_seed); // divide by 2 so it will fit into a signed long long
};
void* d;
cudaMalloc((void **) &d, sz*int_size);
cudaMemcpy( d, (void *) hashes, sz*8, cudaMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> dd_int((unsigned long long int*)d);
delete[] d_array;
delete[] hashes;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
cudaMalloc((void **) &d_val, vals_count*8);
cudaMemcpy(d_val, (void *) int_array, vals_count*8, cudaMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> mval((unsigned long long int*)d_val);
delete[] int_array;
void* d_int;
cudaMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
void* d_v;
cudaMalloc((void **) &d_v, 8);
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v);
dd_v[1] = fit_count;
dd_v[0] = bits_encoded;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)d_v);
thrust::for_each(begin, begin + real_count, ff);
//thrust::device_ptr<long long int> dd_int((long long int*)d);
thrust::device_ptr<unsigned int> dd_val((unsigned int*)d_int);
if(!prm.empty()) {
if(prm_index[segment] == 'R') {
thrust::device_ptr<int_type> d_tmp = thrust::device_malloc<int_type>(real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_tmp);
if(prm_d.size() == 0) // find the largest prm segment
prm_d.resize(largest_prm(this));
cudaMemcpy((void**)(thrust::raw_pointer_cast(prm_d.data())), (void**)prm[segment],
4*prm_count[segment], cudaMemcpyHostToDevice);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + prm_count[segment]);
thrust::gather(prm_d.begin(), prm_d.begin() + prm_count[segment], d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index[segment] == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
};
cudaFree(d);
cudaFree(d_val);
cudaFree(d_v);
cudaFree(d_int);
};
// takes a char column , hashes strings, copies them to a gpu
void CudaSet::add_hashed_strings(string field, unsigned int segment, unsigned int i_cnt)
{
unsigned int colInd2 = columnNames.find(field)->second;
CudaSet *t = varNames[setMap[field]];
if(not_compressed) { // decompressed strings on a host
unsigned int old_count;
unsigned long long int* hashes = new unsigned long long int[t->mRecCount];
for(unsigned int i = 0; i < t->mRecCount ; i++) {
hashes[i] = MurmurHash64A(t->h_columns_char[t->type_index[colInd2]] + i*t->char_size[t->type_index[colInd2]] + segment*t->maxRecs*t->char_size[t->type_index[colInd2]], t->char_size[t->type_index[colInd2]], hash_seed);
};
if(!prm.empty()) {
if(prm_index[segment] == 'R') {
thrust::device_ptr<unsigned long long int> d_tmp = thrust::device_malloc<unsigned long long int>(t->mRecCount);
thrust::copy(hashes, hashes+mRecCount, d_tmp);
if(prm_d.size() == 0) // find the largest prm segment
prm_d.resize(largest_prm(this));
cudaMemcpy((void**)(thrust::raw_pointer_cast(prm_d.data())), (void**)prm[segment],
4*prm_count[segment], cudaMemcpyHostToDevice);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + prm_count[segment]);
thrust::gather(prm_d.begin(), prm_d.begin() + prm_count[segment], d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index[segment] == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
delete [] hashes;
}
else { // hash the dictionary
decompress_char_hash(colInd2, segment, i_cnt);
};
};
void CudaSet::resize_join(unsigned int addRecs)
{
mRecCount = mRecCount + addRecs;
bool prealloc = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
if(type[i] == 0) {
h_columns_int[type_index[i]].resize(mRecCount);
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(mRecCount);
}
else {
if (h_columns_char[type_index[i]]) {
if (mRecCount > prealloc_char_size) {
h_columns_char[type_index[i]] = (char*)realloc(h_columns_char[type_index[i]], (unsigned long long int)mRecCount*(unsigned long long int)char_size[type_index[i]]);
prealloc = 1;
};
}
else {
h_columns_char[type_index[i]] = new char[(unsigned long long int)mRecCount*(unsigned long long int)char_size[type_index[i]]];
};
};
};
if(prealloc)
prealloc_char_size = mRecCount;
};
void CudaSet::resize(unsigned int addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0) {
h_columns_int[type_index[i]].resize(mRecCount);
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(mRecCount);
}
else {
if (h_columns_char[type_index[i]]) {
h_columns_char[type_index[i]] = (char*)realloc(h_columns_char[type_index[i]], (unsigned long long int)mRecCount*(unsigned long long int)char_size[type_index[i]]);
}
else {
h_columns_char[type_index[i]] = new char[(unsigned long long int)mRecCount*(unsigned long long int)char_size[type_index[i]]];
};
};
};
};
void CudaSet::reserve(unsigned int Recs)
{
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0)
h_columns_int[type_index[i]].reserve(Recs);
else if(type[i] == 1)
h_columns_float[type_index[i]].reserve(Recs);
else {
h_columns_char[type_index[i]] = new char[(unsigned long long int)Recs*(unsigned long long int)char_size[type_index[i]]];
if(h_columns_char[type_index[i]] == NULL) {
cout << "Could not allocate on a host " << Recs << " records of size " << char_size[type_index[i]] << endl;
exit(0);
};
prealloc_char_size = Recs;
};
};
};
void CudaSet::deAllocColumnOnDevice(unsigned int colIndex)
{
if (type[colIndex] == 0 && !d_columns_int.empty()) {
d_columns_int[type_index[colIndex]].resize(0);
d_columns_int[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 1 && !d_columns_float.empty()) {
d_columns_float[type_index[colIndex]].resize(0);
d_columns_float[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 2 && d_columns_char[type_index[colIndex]] != NULL) {
cudaFree(d_columns_char[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = NULL;
};
};
void CudaSet::allocOnDevice(unsigned long long int RecordCount)
{
for(unsigned int i=0; i < mColumnCount; i++)
allocColumnOnDevice(i, RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i <mColumnCount; i++)
deAllocColumnOnDevice(i);
if(!columnGroups.empty() && mRecCount !=0) {
cudaFree(grp);
grp = NULL;
};
if(!prm.empty()) { // free the sources
string some_field;
map<string,int>::iterator it=columnNames.begin();
some_field = (*it).first;
if(setMap[some_field].compare(name)) {
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(unsigned int RecCount, unsigned int colIndex)
{
if (RecCount) {
if (type[colIndex] == 0)
d_columns_int[type_index[colIndex]].resize(mRecCount+RecCount);
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(mRecCount+RecCount);
else {
if (d_columns_char[type_index[colIndex]] != NULL)
cudaFree(d_columns_char[type_index[colIndex]]);
void *d;
cudaMalloc((void **) &d, (mRecCount+RecCount)*char_size[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
};
void CudaSet::resizeDevice(unsigned int RecCount)
{
if (RecCount)
for(unsigned int i=0; i < mColumnCount; i++)
resizeDeviceColumn(RecCount, i);
};
bool CudaSet::onDevice(unsigned int i)
{
unsigned j = type_index[i];
if (type[i] == 0) {
if (d_columns_int.empty())
return 0;
if (d_columns_int[j].size() == 0)
return 0;
}
else if (type[i] == 1) {
if (d_columns_float.empty())
return 0;
if(d_columns_float[j].size() == 0)
return 0;
}
else if (type[i] == 2) {
if(d_columns_char.empty())
return 0;
if(d_columns_char[j] == NULL)
return 0;
};
return 1;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
a->columnNames[(*it).first] = (*it).second;
for(unsigned int i=0; i < mColumnCount; i++) {
a->cols[i] = cols[i];
a->type[i] = type[i];
if(a->type[i] == 0) {
a->d_columns_int.push_back(thrust::device_vector<int_type>());
a->h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >());
a->type_index[i] = a->d_columns_int.size()-1;
}
else if(a->type[i] == 1) {
a->d_columns_float.push_back(thrust::device_vector<float_type>());
a->h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
a->type_index[i] = a->d_columns_float.size()-1;
a->decimal[i] = decimal[i];
}
else {
a->h_columns_char.push_back(NULL);
a->d_columns_char.push_back(NULL);
a->type_index[i] = a->d_columns_char.size()-1;
a->char_size.push_back(char_size[type_index[i]]);
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
unsigned long long int CudaSet::readSegmentsFromFile(unsigned int segNum, unsigned int colIndex)
{
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
unsigned int cnt;
strcat(f1,".");
itoaa(segNum,col_pos);
strcat(f1,col_pos);
std::clock_t start1 = std::clock();
FILE* f;
f = fopen(f1, "rb" );
if(f == NULL) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
size_t rr;
if(type[colIndex] == 0) {
fread(h_columns_int[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_int[type_index[colIndex]].data()))[0];
//cout << "start fread " << f1 << " " << (cnt+8)*8 - 4 << endl;
rr = fread((unsigned int*)(h_columns_int[type_index[colIndex]].data()) + 1, 1, (cnt+8)*8 - 4, f);
if(rr != (cnt+8)*8 - 4) {
cout << "Couldn't read " << (cnt+8)*8 - 4 << " bytes from " << f1 << endl;
exit(0);
};
//cout << "end fread " << rr << endl;
}
else if(type[colIndex] == 1) {
fread(h_columns_float[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_float[type_index[colIndex]].data()))[0];
//cout << "start fread " << f1 << " " << (cnt+8)*8 - 4 << endl;
rr = fread((unsigned int*)(h_columns_float[type_index[colIndex]].data()) + 1, 1, (cnt+8)*8 - 4, f);
if(rr != (cnt+8)*8 - 4) {
cout << "Couldn't read " << (cnt+8)*8 - 4 << " bytes from " << f1 << endl;
exit(0);
};
//cout << "end fread " << rr << endl;
}
else {
decompress_char(f, colIndex, segNum);
};
tot = tot + ( std::clock() - start1 );
fclose(f);
return 0;
};
void CudaSet::decompress_char(FILE* f, unsigned int colIndex, unsigned int segNum)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count;
const unsigned int len = char_size[type_index[colIndex]];
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
void* d;
cudaMalloc((void **) &d, sz*len);
cudaMemcpy( d, (void *) d_array, sz*len, cudaMemcpyHostToDevice);
delete[] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
thrust::device_ptr<unsigned int> param = thrust::device_malloc<unsigned int>(2);
param[1] = fit_count;
param[0] = bits_encoded;
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
cudaMalloc((void **) &d_val, vals_count*8);
cudaMemcpy(d_val, (void *) int_array, vals_count*8, cudaMemcpyHostToDevice);
delete[] int_array;
void* d_int;
cudaMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)thrust::raw_pointer_cast(param));
thrust::for_each(begin, begin + real_count, ff);
//thrust::device_ptr<unsigned int> dd_r((unsigned int*)d_int);
//for(int z = 0 ; z < 3; z++)
//cout << "DD " << dd_r[z] << endl;
//void* d_char;
//cudaMalloc((void **) &d_char, real_count*len);
//cudaMemset(d_char, 0, real_count*len);
//str_gather(d_int, real_count, d, d_char, len);
if(str_offset.count(colIndex) == 0)
str_offset[colIndex] = 0;
//cout << "str off " << str_offset[colIndex] << endl;
//cout << "prm cnt of seg " << segNum << " is " << prm.empty() << endl;
if(!alloced_switch)
str_gather(d_int, real_count, d, d_columns_char[type_index[colIndex]] + str_offset[colIndex]*len, len);
else
str_gather(d_int, real_count, d, alloced_tmp, len);
if(!prm.empty()) {
str_offset[colIndex] = str_offset[colIndex] + prm_count[segNum];
}
else {
str_offset[colIndex] = str_offset[colIndex] + real_count;
};
//if(d_columns_char[type_index[colIndex]])
// cudaFree(d_columns_char[type_index[colIndex]]);
//d_columns_char[type_index[colIndex]] = (char*)d_char;
mRecCount = real_count;
cudaFree(d);
cudaFree(d_val);
thrust::device_free(param);
cudaFree(d_int);
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex, unsigned int segment)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = oldRecCount - maxRecs*(segCount-1);
};
switch(type[colIndex]) {
case 0 :
if(!alloced_switch)
thrust::copy(h_columns_int[type_index[colIndex]].begin() + maxRecs*segment, h_columns_int[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_columns_int[type_index[colIndex]].begin());
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[type_index[colIndex]].begin() + maxRecs*segment, h_columns_int[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_col);
};
break;
case 1 :
if(!alloced_switch)
thrust::copy(h_columns_float[type_index[colIndex]].begin() + maxRecs*segment, h_columns_float[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_columns_float[type_index[colIndex]].begin());
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[type_index[colIndex]].begin() + maxRecs*segment, h_columns_float[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_col);
};
break;
default :
if(!alloced_switch)
cudaMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]] + maxRecs*segment*char_size[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, cudaMemcpyHostToDevice);
else
cudaMemcpy(alloced_tmp, h_columns_char[type_index[colIndex]] + maxRecs*segment*char_size[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, cudaMemcpyHostToDevice);
};
}
else {
unsigned long long int data_offset;
if (partial_load)
data_offset = readSegmentsFromFile(segment,colIndex);
if(type[colIndex] != 2) {
if(d_v == NULL)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
};
if(type[colIndex] == 0) {
if(!alloced_switch) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data()), h_columns_int[type_index[colIndex]].data() + data_offset, d_v, s_v);
}
else {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[type_index[colIndex]].data() + data_offset, d_v, s_v);
};
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
if(!alloced_switch) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()) , h_columns_float[type_index[colIndex]].data() + data_offset, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin(), long_to_float());
}
else {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[type_index[colIndex]].data() + data_offset, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
}
//else // uncompressed float
//cudaMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, cudaMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex) // copy all segments
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
break;
default :
cudaMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, cudaMemcpyHostToDevice);
};
}
else {
long long int data_offset;
unsigned long long int totalRecs = 0;
if(d_v == NULL)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
str_offset[colIndex] = 0;
for(unsigned int i = 0; i < segCount; i++) {
if (partial_load)
data_offset = readSegmentsFromFile(i,colIndex);
if(type[colIndex] == 0) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + totalRecs), h_columns_int[type_index[colIndex]].data() + data_offset, d_v, s_v);
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs) , h_columns_float[type_index[colIndex]].data() + data_offset, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin() + totalRecs, long_to_float());
}
// else uncompressed float
//cudaMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, cudaMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
};
totalRecs = totalRecs + mRecCount;
};
mRecCount = totalRecs;
};
}
void CudaSet::CopyColumnToHost(int colIndex, unsigned int offset, unsigned int RecCount)
{
switch(type[colIndex]) {
case 0 :
thrust::copy(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + RecCount, h_columns_int[type_index[colIndex]].begin() + offset);
break;
case 1 :
thrust::copy(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + RecCount, h_columns_float[type_index[colIndex]].begin() + offset);
break;
default :
cudaMemcpy(h_columns_char[type_index[colIndex]] + offset*char_size[type_index[colIndex]], d_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*RecCount, cudaMemcpyDeviceToHost);
}
}
void CudaSet::CopyColumnToHost(int colIndex)
{
CopyColumnToHost(colIndex, 0, mRecCount);
}
void CudaSet::CopyToHost(unsigned int offset, unsigned int count)
{
for(unsigned int i = 0; i < mColumnCount; i++) {
CopyColumnToHost(i, offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_int[type_index[colIndex]].data());
}
void CudaSet::GroupBy(stack<string> columnRef, unsigned int int_col_count)
{
int grpInd, colIndex;
if(grp)
cudaFree(grp);
CUDA_SAFE_CALL(cudaMalloc((void **) &grp, mRecCount * sizeof(bool)));
thrust::device_ptr<bool> d_grp(grp);
thrust::sequence(d_grp, d_grp+mRecCount, 0, 0);
thrust::device_ptr<bool> d_group = thrust::device_malloc<bool>(mRecCount);
d_group[mRecCount-1] = 1;
unsigned int i_count = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
columnGroups.push(columnRef.top()); // save for future references
colIndex = columnNames[columnRef.top()];
if(!onDevice(colIndex)) {
allocColumnOnDevice(colIndex,mRecCount);
CopyColumnToGpu(colIndex, mRecCount);
grpInd = 1;
}
else
grpInd = 0;
if (type[colIndex] == 0) { // int_type
thrust::transform(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_int[type_index[colIndex]].begin()+1, d_group, thrust::not_equal_to<int_type>());
}
else if (type[colIndex] == 1) { // float_type
thrust::transform(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_float[type_index[colIndex]].begin()+1, d_group, f_not_equal_to());
}
else { // Char
//str_grp(d_columns_char[type_index[colIndex]], mRecCount, d_group, char_size[type_index[colIndex]]);
//use int_type
thrust::transform(d_columns_int[int_col_count+i_count].begin(), d_columns_int[int_col_count+i_count].begin() + mRecCount - 1,
d_columns_int[int_col_count+i_count].begin()+1, d_group, thrust::not_equal_to<int_type>());
i_count++;
};
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<bool>());
if (grpInd == 1)
deAllocColumnOnDevice(colIndex);
};
thrust::device_free(d_group);
grp_count = thrust::count(d_grp, d_grp+mRecCount,1);
};
void CudaSet::addDeviceColumn(int_type* col, int colIndex, string colName, unsigned int recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
d_columns_int.push_back(thrust::device_vector<int_type>(recCount));
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >());
type_index[colIndex] = d_columns_int.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_int[type_index[colIndex]].size() < recCount) {
d_columns_int[type_index[colIndex]].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[type_index[colIndex]].begin());
};
void CudaSet::addDeviceColumn(float_type* col, int colIndex, string colName, unsigned int recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
d_columns_float.push_back(thrust::device_vector<float_type>(recCount));
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
type_index[colIndex] = d_columns_float.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_float[type_index[colIndex]].size() < recCount)
d_columns_float[type_index[colIndex]].resize(recCount);
};
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[type_index[colIndex]].begin());
};
void CudaSet::compress(char* file_name, unsigned int offset, unsigned int check_type, unsigned int check_val, void* d, unsigned int mCount)
{
char str[100];
char col_pos[3];
thrust::device_vector<unsigned int> permutation;
total_count = total_count + mCount;
total_segments = total_segments + 1;
if (mCount > total_max)
total_max = mCount;
if(!op_sort.empty()) { //sort the segment
//copy the key columns to device
queue<string> sf(op_sort);
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
cout << "sorting " << getFreeMem() << endl;
unsigned int max_c = max_char(this, sf);
if(max_c > float_size)
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, mRecCount*max_c));
else
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, mRecCount*float_size));
string sort_type = "ASC";
while(!sf.empty()) {
int colInd = columnNames[sf.front()];
allocColumnOnDevice(colInd, maxRecs);
CopyColumnToGpu(colInd);
if (type[colInd] == 0)
update_permutation(d_columns_int[type_index[colInd]], raw_ptr, mRecCount, sort_type, (int_type*)temp);
else if (type[colInd] == 1)
update_permutation(d_columns_float[type_index[colInd]], raw_ptr, mRecCount, sort_type, (float_type*)temp);
else {
update_permutation_char(d_columns_char[type_index[colInd]], raw_ptr, mRecCount, sort_type, (char*)temp, char_size[type_index[colInd]]);
};
deAllocColumnOnDevice(colInd);
sf.pop();
};
cudaFree(temp);
};
for(unsigned int i = 0; i< mColumnCount; i++) {
strcpy(str, file_name);
strcat(str,".");
itoaa(cols[i],col_pos);
strcat(str,col_pos);
curr_file = str;
strcat(str,".");
itoaa(total_segments-1,col_pos);
strcat(str,col_pos);
if(!op_sort.empty()) {
allocColumnOnDevice(i, maxRecs);
CopyColumnToGpu(i);
};
if(type[i] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[type_index[i]].begin(), d_col);
}
else {
thrust::copy(h_columns_int[type_index[i]].begin() + offset, h_columns_int[type_index[i]].begin() + offset + mCount, d_col);
};
pfor_compress( d, mCount*int_size, str, h_columns_int[type_index[i]], 0, 0);
}
else if(type[i] == 1) {
if(decimal[i]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[type_index[i]].begin(), d_col);
}
else {
thrust::copy(h_columns_float[type_index[i]].begin() + offset, h_columns_float[type_index[i]].begin() + offset + mCount, d_col);
};
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[type_index[i]], 1, 0);
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[type_index[i]].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[type_index[i]].begin());
};
fstream binary_file(str,ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[type_index[i]].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else { //char
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[type_index[i]]*mRecCount];
apply_permutation_char_host(h_columns_char[type_index[i]], h_permutation, mRecCount, t, char_size[type_index[i]]);
thrust::copy(t, t+ char_size[type_index[i]]*mRecCount, h_columns_char[type_index[i]]);
delete [] t;
};
compress_char(str, i, mCount, offset);
};
if(check_type == 1) {
if(fact_file_loaded) {
writeHeader(file_name, cols[i]);
}
}
else {
if(check_val == 0) {
writeHeader(file_name, cols[i]);
};
};
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::writeHeader(char* file_name, unsigned int col) {
char str[100];
char col_pos[3];
strcpy(str, file_name);
strcat(str,".");
itoaa(col,col_pos);
strcat(str,col_pos);
string ff = str;
strcat(str,".header");
fstream binary_file(str,ios::out|ios::binary|ios::app);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&total_segments, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
binary_file.close();
};
void CudaSet::writeSortHeader(char* file_name)
{
char str[100];
unsigned int idx;
strcpy(str, file_name);
strcat(str,".sort");
fstream binary_file(str,ios::out|ios::binary|ios::app);
idx = op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
idx = columnNames[os.front()];
binary_file.write((char *)&idx, 4);
os.pop();
};
binary_file.close();
}
void CudaSet::Store(char* file_name, char* sep, unsigned int limit, bool binary )
{
if (mRecCount == 0 && binary == 1) { // write tails
for(unsigned int i = 0; i< mColumnCount; i++) {
writeHeader(file_name, cols[i]);
};
return;
};
unsigned int mCount, cnt;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else
mCount = mRecCount;
if(binary == 0) {
char buffer [33];
queue<string> op_vx;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
op_vx.push((*it).first);
curr_segment = 1000000;
FILE *file_pr = fopen(file_name, "w");
if (file_pr == NULL)
cout << "Could not open file " << file_name << endl;
if(prm.size() || source)
allocColumns(this, op_vx);
unsigned int curr_seg = 0, cnt = 0;
unsigned curr_count, sum_printed = 0;
while(sum_printed < mCount) {
if(prm.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
// if host arrays are empty
unsigned int olRecs = mRecCount;
resize(mRecCount);
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount)
curr_count = mRecCount;
else {
curr_count = mCount - sum_printed;
};
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
string ss;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < mColumnCount; j++) {
if (type[j] == 0) {
sprintf(buffer, "%lld", (h_columns_int[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else if (type[j] == 1) {
sprintf(buffer, "%.2f", (h_columns_float[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else {
ss.assign(h_columns_char[type_index[j]] + (i*char_size[type_index[j]]), char_size[type_index[j]]);
trim(ss);
fputs(ss.c_str(), file_pr);
fputs(sep, file_pr);
};
};
if (i != mCount -1)
fputs("\n",file_pr);
};
curr_seg++;
};
fclose(file_pr);
}
else if(text_source) { //writing a binary file using a text file as a source
//char str[100];
//char col_pos[3];
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, mCount*float_size));
compress(file_name, 0, 1, 0, d, mCount);
writeSortHeader(file_name);
/*for(unsigned int i = 0; i< mColumnCount; i++) {
strcpy(str, file_name);
strcat(str,".");
itoaa(cols[i],col_pos);
strcat(str,col_pos);
curr_file = str;
strcat(str,".");
itoaa(total_segments-1,col_pos);
strcat(str,col_pos);
if(type[i] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::copy(h_columns_int[type_index[i]].begin(), h_columns_int[type_index[i]].begin() + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[type_index[i]], 0, 0);
}
else if(type[i] == 1) {
if(decimal[i]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
thrust::copy(h_columns_float[type_index[i]].begin(), h_columns_float[type_index[i]].begin() + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[type_index[i]], 1, 0);
}
else { // do not compress -- float
fstream binary_file(str,ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[type_index[i]].data()),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else { //char
compress_char(str, i, mCount, 0);
};
if(fact_file_loaded) {
writeHeader(file_name, cols[i]);
};
};
*/
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 2)
deAllocColumnOnDevice(i);
cudaFree(d);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
unsigned int offset = 0;
void* d;
if(mRecCount < process_count) {
CUDA_SAFE_CALL(cudaMalloc((void **) &d, mRecCount*float_size));
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &d, process_count*float_size));
};
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it ) {
op_vx.push((*it).first);
};
allocColumns(this, op_vx);
unsigned int oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
cnt = 0;
copyColumns(this, op_vx, i, cnt);
reset_offsets();
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), d, mRecCount);
};
//mRecCount = offset;
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = mRecCount/process_count + 1;
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else
mCount = mRecCount - (segCount-1)*process_count;
compress(file_name, offset, 0, z - (segCount-1), d, mCount);
offset = offset + mCount;
};
cudaFree(d);
};
};
}
void CudaSet::compress_char(string file_name, unsigned int index, unsigned int mCount, unsigned int offset)
{
std::map<string,unsigned int> dict;
std::vector<string> dict_ordered;
std::vector<unsigned int> dict_val;
map<string,unsigned int>::iterator iter;
unsigned int bits_encoded;
char* field;
unsigned int len = char_size[type_index[index]];
field = new char[len];
for (unsigned int i = 0 ; i < mCount; i++) {
strncpy(field, h_columns_char[type_index[index]] + (i+offset)*len, char_size[type_index[index]]);
if((iter = dict.find(field)) != dict.end()) {
dict_val.push_back(iter->second);
}
else {
string f = field;
dict[f] = dict.size();
dict_val.push_back(dict.size()-1);
dict_ordered.push_back(f);
};
};
delete [] field;
bits_encoded = (unsigned int)ceil(log2(double(dict.size()+1)));
char *cc = new char[len+1];
unsigned int sz = dict_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary);
binary_file.write((char *)&sz, 4);
for(unsigned int i = 0; i < dict_ordered.size(); i++) {
memset(&cc[0], 0, len);
strcpy(cc,dict_ordered[i].c_str());
binary_file.write(cc, len);
};
delete [] cc;
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, 8);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
int CudaSet::LoadBigFile(const char* file_name, const char* sep )
{
char line[1000];
unsigned int current_column, count = 0, index;
char *p,*t;
if (file_p == NULL)
file_p = fopen(file_name, "r");
if (file_p == NULL) {
cout << "Could not open file " << file_name << endl;
exit(0);
};
map<unsigned int,unsigned int> col_map;
for(unsigned int i = 0; i < mColumnCount; i++) {
col_map[cols[i]] = i;
};
while (count < process_count && fgets(line, 1000, file_p) != NULL) {
strtok(line, "\n");
current_column = 0;
for(t=mystrtok(&p,line,'|');t;t=mystrtok(&p,0,'|')) {
current_column++;
if(col_map.find(current_column) == col_map.end()) {
continue;
};
index = col_map[current_column];
if (type[index] == 0) {
if (strchr(t,'-') == NULL) {
(h_columns_int[type_index[index]])[count] = atoll(t);
}
else { // handling possible dates
strncpy(t+4,t+5,2);
strncpy(t+6,t+8,2);
t[8] = '\0';
(h_columns_int[type_index[index]])[count] = atoll(t);
};
}
else if (type[index] == 1) {
(h_columns_float[type_index[index]])[count] = atoff(t);
}
else {//char
strcpy(h_columns_char[type_index[index]] + count*char_size[type_index[index]], t);
}
};
count++;
};
mRecCount = count;
if(count < process_count) {
fclose(file_p);
return 1;
}
else
return 0;
};
void CudaSet::free() {
if (!seq)
delete seq;
for(unsigned int i = 0; i < mColumnCount; i++ ) {
if(type[i] == 2 && h_columns_char[type_index[i]] && prm.empty()) {
delete [] h_columns_char[type_index[i]];
h_columns_char[type_index[i]] = NULL;
}
else {
if(type[i] == 0 ) {
h_columns_int[type_index[i]].resize(0);
h_columns_int[type_index[i]].shrink_to_fit();
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(0);
h_columns_float[type_index[i]].shrink_to_fit();
};
}
}
if(!prm.empty()) { // free the sources
string some_field;
map<string,int>::iterator it=columnNames.begin();
some_field = (*it).first;
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
delete type;
delete cols;
if(!columnGroups.empty() && mRecCount !=0 && grp != NULL)
cudaFree(grp);
for(unsigned int i = 0; i < prm.size(); i++)
delete [] prm[i];
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
}
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
};
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name) // compressed data for DIM tables
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
unsigned int cnt;
file_p = NULL;
FILE* f;
char f1[100];
prealloc_char_size = 0;
not_compressed = 0;
mRecCount = Recs;
oldRecCount = Recs;
load_file_name = file_name;
strcpy(f1, file_name);
strcat(f1, ".sort");
cout << "opening " << f1 << endl;
f = fopen (f1 , "rb" );
if(f != NULL) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
sorted_fields.push(idx);
//cout << "presorted on " << idx << endl;
};
fclose(f);
};
tmp_table = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
strcpy(f1, file_name);
strcat(f1,".");
char col_pos[3];
itoaa(colsRef.front(),col_pos);
strcat(f1,col_pos); // read the size of a segment
strcat(f1, ".header");
f = fopen (f1 , "rb" );
for(unsigned int j = 0; j < 5; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
//cout << "creating " << f1 << " " << cnt << endl;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >(cnt + 9));
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type >());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
prealloc_char_size = 0;
file_p = NULL;
tmp_table = 0;
mRecCount = Recs;
oldRecCount = Recs;
segCount = 1;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(unsigned int RecordCount, unsigned int ColumnCount)
{
mRecCount = RecordCount;
oldRecCount = RecordCount;
mColumnCount = ColumnCount;
prealloc_char_size = 0;
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
seq = 0;
for(unsigned int i =0; i < mColumnCount; i++) {
cols[i] = i;
};
};
void CudaSet::initialize(queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = op_sel.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
seq = 0;
segCount = 1;
not_compressed = 1;
col_aliases = op_sel_as;
prealloc_char_size = 0;
unsigned int index;
unsigned int i = 0;
while(!op_sel.empty()) {
if(!setMap.count(op_sel.front())) {
cout << "coudn't find column " << op_sel.front() << endl;
exit(0);
};
CudaSet* a = varNames[setMap[op_sel.front()]];
if(i == 0)
maxRecs = a->maxRecs;
index = a->columnNames[op_sel.front()];
cols[i] = i;
decimal[i] = a->decimal[i];
columnNames[op_sel.front()] = i;
if (a->type[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(a->char_size[a->type_index[index]]);
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if(a->columnNames.find(q_cnt.front()) != a->columnNames.end() || b->columnNames.find(q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = field_names.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
maxRecs = b->maxRecs;
map<string,int>::iterator it;
seq = 0;
segCount = 1;
not_compressed = 1;
col_aliases = op_sel_as;
prealloc_char_size = 0;
unsigned int index;
i = 0;
while(!op_sel.empty() && (columnNames.find(op_sel.front()) == columnNames.end())) {
if((it = a->columnNames.find(op_sel.front())) != a->columnNames.end()) {
index = it->second;
cols[i] = i;
decimal[i] = a->decimal[i];
columnNames[op_sel.front()] = i;
if (a->type[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(a->char_size[a->type_index[index]]);
};
i++;
}
else if((it = b->columnNames.find(op_sel.front())) != b->columnNames.end()) {
index = it->second;
columnNames[op_sel.front()] = i;
cols[i] = i;
decimal[i] = b->decimal[index];
if ((b->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((b->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(b->char_size[b->type_index[index]]);
};
i++;
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 5;
else if (op_type == 1) // <
return 6;
else if (op_type == 6) // >=
return 1;
else if (op_type == 5) // <=
return 2;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
cudaMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(!a->prm.empty()) {
unsigned int max_sz = max_tmp(a) ;
CudaSet* t = varNames[setMap[fields.front()]];
if(max_sz*t->maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, max_sz*t->maxRecs);
alloced_sz = max_sz*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(setMap.count(fields.front()) > 0) {
unsigned int idx = a->columnNames[fields.front()];
bool onDevice = 0;
if(a->type[idx] == 0) {
if(a->d_columns_int[a->type_index[idx]].size() > 0) {
onDevice = 1;
}
}
else if(a->type[idx] == 1) {
if(a->d_columns_float[a->type_index[idx]].size() > 0) {
onDevice = 1;
};
}
else {
if((a->d_columns_char[a->type_index[idx]]) != NULL) {
onDevice = 1;
};
};
if (!onDevice) {
if(a->prm.empty()) {
a->allocColumnOnDevice(idx, a->maxRecs);
}
else {
a->allocColumnOnDevice(idx, largest_prm(a));
};
}
}
fields.pop();
};
};
}
unsigned long long int largest_prm(CudaSet* a)
{
unsigned long long int maxx = 0;
for(unsigned int i = 0; i < a->prm_count.size(); i++)
if(maxx < a->prm_count[i])
maxx = a->prm_count[i];
if(maxx == 0)
maxx = a->maxRecs;
return maxx;
};
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, unsigned int& count)
{
unsigned int tindex = t->columnNames[field];
unsigned int idx = a->columnNames[field];
//find the largest possible size of a gathered segment
if(!a->onDevice(idx)) {
unsigned int max_count = 0;
for(unsigned int i = 0; i < a->prm.size(); i++)
if (a->prm_count[i] > max_count)
max_count = a->prm_count[i];
a->allocColumnOnDevice(idx, max_count);
};
unsigned int g_size = a->prm_count[segment];
if(a->prm_index[segment] == 'R') {
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a));
if(curr_segment != segment) {
cudaMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[segment],
4*g_size, cudaMemcpyHostToDevice);
curr_segment = segment;
};
mygather(tindex, idx, a, t, count, g_size);
}
else {
mycopy(tindex, idx, a, t, count, g_size);
};
a->mRecCount = g_size;
}
unsigned int getSegmentRecCount(CudaSet* a, unsigned int segment) {
if (segment == a->segCount-1) {
return oldCount - a->maxRecs*segment;
}
else
return a->maxRecs;
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, unsigned int& count)
{
set<string> uniques;
CudaSet *t;
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && setMap.count(fields.front()) > 0) {
if(!a->prm.empty()) {
t = varNames[setMap[fields.front()]];
if(a->prm_count[segment]) {
alloced_switch = 1;
t->CopyColumnToGpu(t->columnNames[fields.front()], segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
}
else
a->mRecCount = 0;
}
else {
a->CopyColumnToGpu(a->columnNames[fields.front()], segment);
};
uniques.insert(fields.front());
};
fields.pop();
};
}
void setPrm(CudaSet* a, CudaSet* b, char val, unsigned int segment) {
b->prm.push_back(NULL);
b->prm_index.push_back(val);
if (val == 'A') {
b->mRecCount = b->mRecCount + getSegmentRecCount(a,segment);
b->prm_count.push_back(getSegmentRecCount(a, segment));
}
else {
b->prm_count.push_back(0);
};
}
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int offset, unsigned int g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
(void*)t->d_columns_char[t->type_index[tindex]], (void*)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), a->char_size[a->type_index[idx]] );
}
else {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
alloced_tmp, (void*)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), a->char_size[a->type_index[idx]] );
};
}
};
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int offset, unsigned int g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::copy(t->d_columns_int[t->type_index[tindex]].begin(), t->d_columns_int[t->type_index[tindex]].begin() + g_size,
a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::copy(t->d_columns_float[t->type_index[tindex]].begin(), t->d_columns_float[t->type_index[tindex]].begin() + g_size,
a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
cudaMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), (void**)t->d_columns_char[t->type_index[tindex]],
g_size*t->char_size[t->type_index[tindex]], cudaMemcpyDeviceToDevice);
}
else {
cudaMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), alloced_tmp,
g_size*t->char_size[t->type_index[tindex]], cudaMemcpyDeviceToDevice);
};
};
};
unsigned int load_queue(queue<string> c1, CudaSet* right, bool str_join, string f2, unsigned int &rcount)
{
queue<string> cc;
while(!c1.empty()) {
if(right->columnNames.find(c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() || str_join) {
cc.push(c1.front());
};
};
c1.pop();
};
if(!str_join && right->columnNames.find(f2) != right->columnNames.end()) {
cc.push(f2);
};
unsigned int cnt_r = 0;
if(!right->prm.empty()) {
allocColumns(right, cc);
rcount = std::accumulate(right->prm_count.begin(), right->prm_count.end(), 0 );
}
else
rcount = right->mRecCount;
queue<string> ct(cc);
reset_offsets();
while(!ct.empty()) {
right->allocColumnOnDevice(right->columnNames[ct.front()], rcount);
ct.pop();
};
ct = cc;
if(right->prm.empty()) {
//copy all records
while(!ct.empty()) {
right->CopyColumnToGpu(right->columnNames[ct.front()]);
ct.pop();
};
cnt_r = right->mRecCount;
}
else {
//copy and gather all records
for(unsigned int i = 0; i < right->segCount; i++) {
reset_offsets();
copyColumns(right, cc, i, cnt_r);
cnt_r = cnt_r + right->prm_count[i];
};
};
return cnt_r;
}
unsigned int max_char(CudaSet* a)
{
unsigned int max_char = 0;
for(unsigned int i = 0; i < a->char_size.size(); i++)
if (a->char_size[i] > max_char)
max_char = a->char_size[i];
return max_char;
};
unsigned int max_char(CudaSet* a, set<string> field_names)
{
unsigned int max_char = 0, i;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
i = a->columnNames[*it];
if (a->type[i] == 2) {
if (a->char_size[a->type_index[i]] > max_char)
max_char = a->char_size[a->type_index[i]];
};
};
return max_char;
};
unsigned int max_char(CudaSet* a, queue<string> field_names)
{
unsigned int max_char = 0, i;
while (!field_names.empty()) {
i = a->columnNames[field_names.front()];
if (a->type[i] == 2) {
if (a->char_size[a->type_index[i]] > max_char)
max_char = a->char_size[a->type_index[i]];
};
field_names.pop();
};
return max_char;
};
unsigned int max_tmp(CudaSet* a)
{
unsigned int max_sz = 0;
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[i] == 0) {
if(int_size > max_sz)
max_sz = int_size;
}
else if(a->type[i] == 1) {
if(float_size > max_sz)
max_sz = float_size;
};
};
unsigned int m_char = max_char(a);
if(m_char > max_sz)
return m_char;
else
return max_sz;
};
void reset_offsets() {
map<unsigned int, unsigned int>::iterator iter;
for (iter = str_offset.begin(); iter != str_offset.end(); ++iter) {
iter->second = 0;
};
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
unsigned int tot_sz = 0, idx;
while(!cols.empty()) {
idx = a->columnNames[cols.front()];
if(a->type[idx] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[a->type_index[idx]];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char(char* key, unsigned int* permutation, unsigned int RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather((void*)permutation, RecCount, (void*)key, (void*)tmp, len);
// stable_sort the permuted keys and update the permutation
if (SortType.compare("DESC") == 0 )
str_sort(tmp, RecCount, permutation, 1, len);
else
str_sort(tmp, RecCount, permutation, 0, len);
}
void update_permutation_char_host(char* key, unsigned int* permutation, unsigned int RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, unsigned int RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
cudaMemcpy( (void*)tmp, (void*) key, RecCount*len, cudaMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, unsigned int RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
|
4884fb3e9e71ac37b0a1a67e712c6200bf01951a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef ENABLE_CURD
#include<curd_lib_host.h>
#endif
/*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#define _CUDA_COMPILER_
#include "support/common.h"
// CUDA kernel ------------------------------------------------------------------------------------------
__global__ void TQHistogram_gpu(task_t *queue, int *data, int *histo, int offset,
int gpuQueueSize, int *consumed, int frame_size, int n_bins) {
extern __shared__ int l_mem[];
int* next = l_mem;
task_t* t = (task_t*)&next[1];
int* l_histo = (int*)&t[1];
const int tid = threadIdx.x;
const int tileid = blockIdx.x;
const int tile_size = blockDim.x;
// Fetch task
if(tid == 0) {
*next = atomicAdd(consumed, 1);
t->id = queue[*next].id;
t->op = queue[*next].op;
}
__syncthreads();
while(*next < gpuQueueSize) {
// Compute task
if(t->op == SIGNAL_WORK_KERNEL) {
// Reset local histogram
for(int i = tid; i < n_bins; i += tile_size) {
l_histo[i] = 0;
}
__syncthreads();
for(int i = tid; i < frame_size; i += tile_size) {
int value = (data[(t->id - offset) * frame_size + i] * n_bins) >> 8;
atomicAdd(&l_histo[value], 1);
}
__syncthreads();
// Store in global memory
for(int i = tid; i < n_bins; i += tile_size) {
histo[(t->id - offset) * n_bins + i] = l_histo[i];
}
}
if(tid == 0) {
*next = atomicAdd(consumed, 1);
// Fetch task
t->id = queue[*next].id;
t->op = queue[*next].op;
}
__syncthreads();
}
}
hipError_t call_TQHistogram_gpu(int blocks, int threads, task_t *queue, int *data, int *histo,
int offset, int gpuQueueSize, int *consumed, int frame_size, int n_bins, int l_mem_size){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
#ifdef ENABLE_CURD
allocateReadWriteSets(dimGrid, dimBlock);
#endif
hipLaunchKernelGGL(( TQHistogram_gpu), dim3(dimGrid), dim3(dimBlock), l_mem_size, 0, queue, data, histo,
offset, gpuQueueSize, consumed, frame_size, n_bins);
#ifdef ENABLE_CURD
freeReadWriteSets(dimGrid, dimBlock);
#endif
hipError_t err = hipGetLastError();
return err;
}
| 4884fb3e9e71ac37b0a1a67e712c6200bf01951a.cu | #ifdef ENABLE_CURD
#include<curd_lib_host.h>
#endif
/*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#define _CUDA_COMPILER_
#include "support/common.h"
// CUDA kernel ------------------------------------------------------------------------------------------
__global__ void TQHistogram_gpu(task_t *queue, int *data, int *histo, int offset,
int gpuQueueSize, int *consumed, int frame_size, int n_bins) {
extern __shared__ int l_mem[];
int* next = l_mem;
task_t* t = (task_t*)&next[1];
int* l_histo = (int*)&t[1];
const int tid = threadIdx.x;
const int tileid = blockIdx.x;
const int tile_size = blockDim.x;
// Fetch task
if(tid == 0) {
*next = atomicAdd(consumed, 1);
t->id = queue[*next].id;
t->op = queue[*next].op;
}
__syncthreads();
while(*next < gpuQueueSize) {
// Compute task
if(t->op == SIGNAL_WORK_KERNEL) {
// Reset local histogram
for(int i = tid; i < n_bins; i += tile_size) {
l_histo[i] = 0;
}
__syncthreads();
for(int i = tid; i < frame_size; i += tile_size) {
int value = (data[(t->id - offset) * frame_size + i] * n_bins) >> 8;
atomicAdd(&l_histo[value], 1);
}
__syncthreads();
// Store in global memory
for(int i = tid; i < n_bins; i += tile_size) {
histo[(t->id - offset) * n_bins + i] = l_histo[i];
}
}
if(tid == 0) {
*next = atomicAdd(consumed, 1);
// Fetch task
t->id = queue[*next].id;
t->op = queue[*next].op;
}
__syncthreads();
}
}
cudaError_t call_TQHistogram_gpu(int blocks, int threads, task_t *queue, int *data, int *histo,
int offset, int gpuQueueSize, int *consumed, int frame_size, int n_bins, int l_mem_size){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
#ifdef ENABLE_CURD
allocateReadWriteSets(dimGrid, dimBlock);
#endif
TQHistogram_gpu<<<dimGrid, dimBlock, l_mem_size>>>(queue, data, histo,
offset, gpuQueueSize, consumed, frame_size, n_bins);
#ifdef ENABLE_CURD
freeReadWriteSets(dimGrid, dimBlock);
#endif
cudaError_t err = cudaGetLastError();
return err;
}
|
0ac691abc0fb033f476b5a85bff52015b49763e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include "yololayer.h"
#include "utils_yolov5.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel)
{
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
mYoloKernel = vYoloKernel;
mKernelCount = vYoloKernel.size();
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipFree(mAnchor[ii]));
}
CUDA_CHECK(hipHostFree(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
// pytorch:
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
// X: (sigmoid(tx) + cx)/FeaturemapW * netwidth
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
// W: (Pw * e^tw) / FeaturemapW * netwidth
// v5: https://github.com/ultralytics/yolov5/issues/471
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize)
{
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(hipMemset(output + idx * outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
//printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight);
CalDetection << < (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount >> >
(inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
int class_count = 80;
int input_w = 416;
int input_h = 416;
int max_output_object_count = 1000;
std::vector<Yolo::YoloKernel> yolo_kernels(3);
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; i++) {
if (strcmp(fields[i].name, "netdata") == 0) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
class_count = tmp[0];
input_w = tmp[1];
input_h = tmp[2];
max_output_object_count = tmp[3];
} else if (strstr(fields[i].name, "yolodata") != NULL) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
YoloKernel kernel;
kernel.width = tmp[0];
kernel.height = tmp[1];
for (int j = 0; j < fields[i].length - 2; j++) {
kernel.anchors[j] = tmp[j + 2];
}
yolo_kernels[2 - (fields[i].name[8] - '1')] = kernel;
}
}
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, yolo_kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| 0ac691abc0fb033f476b5a85bff52015b49763e1.cu | #include <assert.h>
#include "yololayer.h"
#include "utils_yolov5.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel)
{
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
mYoloKernel = vYoloKernel;
mKernelCount = vYoloKernel.size();
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaFree(mAnchor[ii]));
}
CUDA_CHECK(cudaFreeHost(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
// pytorch:
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
// X: (sigmoid(tx) + cx)/FeaturemapW * netwidth
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
// W: (Pw * e^tw) / FeaturemapW * netwidth
// v5: https://github.com/ultralytics/yolov5/issues/471
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize)
{
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemset(output + idx * outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
//printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight);
CalDetection << < (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount >> >
(inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
int class_count = 80;
int input_w = 416;
int input_h = 416;
int max_output_object_count = 1000;
std::vector<Yolo::YoloKernel> yolo_kernels(3);
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; i++) {
if (strcmp(fields[i].name, "netdata") == 0) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
class_count = tmp[0];
input_w = tmp[1];
input_h = tmp[2];
max_output_object_count = tmp[3];
} else if (strstr(fields[i].name, "yolodata") != NULL) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
YoloKernel kernel;
kernel.width = tmp[0];
kernel.height = tmp[1];
for (int j = 0; j < fields[i].length - 2; j++) {
kernel.anchors[j] = tmp[j + 2];
}
yolo_kernels[2 - (fields[i].name[8] - '1')] = kernel;
}
}
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, yolo_kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
5b52becc3dca46709aa6b2318d997f3fb2ec38d1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "nonMaxSuppressionDevice.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int width = XSIZE;
int height = YSIZE;
float *d_gradientX = NULL;
hipMalloc(&d_gradientX, XSIZE*YSIZE);
float *d_gradientY = NULL;
hipMalloc(&d_gradientY, XSIZE*YSIZE);
float *d_gradientMag = NULL;
hipMalloc(&d_gradientMag, XSIZE*YSIZE);
float *d_nonMax = NULL;
hipMalloc(&d_nonMax, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
nonMaxSuppressionDevice), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,d_gradientX,d_gradientY,d_gradientMag,d_nonMax);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
nonMaxSuppressionDevice), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,d_gradientX,d_gradientY,d_gradientMag,d_nonMax);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
nonMaxSuppressionDevice), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,d_gradientX,d_gradientY,d_gradientMag,d_nonMax);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5b52becc3dca46709aa6b2318d997f3fb2ec38d1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "nonMaxSuppressionDevice.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int width = XSIZE;
int height = YSIZE;
float *d_gradientX = NULL;
cudaMalloc(&d_gradientX, XSIZE*YSIZE);
float *d_gradientY = NULL;
cudaMalloc(&d_gradientY, XSIZE*YSIZE);
float *d_gradientMag = NULL;
cudaMalloc(&d_gradientMag, XSIZE*YSIZE);
float *d_nonMax = NULL;
cudaMalloc(&d_nonMax, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
nonMaxSuppressionDevice<<<gridBlock,threadBlock>>>(width,height,d_gradientX,d_gradientY,d_gradientMag,d_nonMax);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
nonMaxSuppressionDevice<<<gridBlock,threadBlock>>>(width,height,d_gradientX,d_gradientY,d_gradientMag,d_nonMax);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
nonMaxSuppressionDevice<<<gridBlock,threadBlock>>>(width,height,d_gradientX,d_gradientY,d_gradientMag,d_nonMax);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b129d07af2f1d2f2746790fcddd5976ca8eecaaa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
#include "utils.h"
#include <thrust/host_vector.h>
#include "stdio.h"
///////////////////////////////////////////////////////////////////////////////////
void your_sort(unsigned int* d_inputVals,
unsigned int* d_inputPos,
unsigned int* d_outputVals,
unsigned int* d_outputPos,
const size_t numElems)
{
int SIZE = 128 * 2048;
int DELTA = SIZE - numElems;
int M = 2048;
int K = SIZE/M;
int LIMIT = 1044000000;
unsigned int * d_aP;
unsigned int * d_aV;
int * d_Bl;
int * d_Bh;
float *d_x;
float m_x[8];
printf("* SIZE = %d, DELTA = %d \n", SIZE, DELTA);
checkCudaErrors( hipMalloc((void**) &d_aP, SIZE*sizeof(int)) );
checkCudaErrors( hipMalloc((void**) &d_aV, SIZE*sizeof(int)) );
checkCudaErrors( hipMalloc((void**) &d_Bl, SIZE*sizeof(int)) );
checkCudaErrors( hipMalloc((void**) &d_Bh, SIZE*sizeof(int)) );
checkCudaErrors( hipMalloc(&d_x, 8*sizeof(float)) );
hipLaunchKernelGGL(( move), dim3(1), dim3(M/2), 0, 0, d_inputVals, d_inputPos, d_aV, d_aP, 2*K, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( check), dim3(K), dim3(M/2), 0, 0, d_aV, d_Bl, d_Bh, LIMIT, numElems );
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( scan_collect), dim3(K), dim3(M/2), 0, 0, (unsigned int *) d_Bl, M );
hipLaunchKernelGGL(( scan_collect), dim3(K), dim3(M/2), 0, 0, (unsigned int *) d_Bh, M );
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( scan_second), dim3(1), dim3(K/2), 0, 0, (unsigned int *) d_Bl, K, M );
hipLaunchKernelGGL(( scan_second), dim3(1), dim3(K/2), 0, 0, (unsigned int *) d_Bh, K, M );
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( scan_distr), dim3(K), dim3(M/2), 0, 0, (unsigned int *) d_Bl, M );
hipLaunchKernelGGL(( scan_distr), dim3(K), dim3(M/2), 0, 0, (unsigned int *) d_Bh, M );
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( place), dim3(2*K), dim3(M/2), 0, 0, d_aV, d_aP, d_outputVals, d_outputPos, d_Bl, d_Bh, LIMIT, numElems );
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( test), dim3(1), dim3(1), 0, 0, (unsigned int *) d_outputVals, d_outputPos, 0, numElems-1200, d_x ); // K*M
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( test), dim3(1), dim3(1), 0, 0, (unsigned int *) d_outputVals, d_outputPos, numElems-1100, numElems, d_x ); // K*M
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//
}
///////////////////////////////////////////////////////////////////////////////////
| b129d07af2f1d2f2746790fcddd5976ca8eecaaa.cu | //Udacity HW 4
#include "utils.h"
#include <thrust/host_vector.h>
#include "stdio.h"
///////////////////////////////////////////////////////////////////////////////////
void your_sort(unsigned int* d_inputVals,
unsigned int* d_inputPos,
unsigned int* d_outputVals,
unsigned int* d_outputPos,
const size_t numElems)
{
int SIZE = 128 * 2048;
int DELTA = SIZE - numElems;
int M = 2048;
int K = SIZE/M;
int LIMIT = 1044000000;
unsigned int * d_aP;
unsigned int * d_aV;
int * d_Bl;
int * d_Bh;
float *d_x;
float m_x[8];
printf("* SIZE = %d, DELTA = %d \n", SIZE, DELTA);
checkCudaErrors( cudaMalloc((void**) &d_aP, SIZE*sizeof(int)) );
checkCudaErrors( cudaMalloc((void**) &d_aV, SIZE*sizeof(int)) );
checkCudaErrors( cudaMalloc((void**) &d_Bl, SIZE*sizeof(int)) );
checkCudaErrors( cudaMalloc((void**) &d_Bh, SIZE*sizeof(int)) );
checkCudaErrors( cudaMalloc(&d_x, 8*sizeof(float)) );
move<<<1, M/2>>>( d_inputVals, d_inputPos, d_aV, d_aP, 2*K, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
check<<<K, M/2>>>( d_aV, d_Bl, d_Bh, LIMIT, numElems );
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
scan_collect<<<K, M/2>>>( (unsigned int *) d_Bl, M );
scan_collect<<<K, M/2>>>( (unsigned int *) d_Bh, M );
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
scan_second<<<1, K/2>>>( (unsigned int *) d_Bl, K, M );
scan_second<<<1, K/2>>>( (unsigned int *) d_Bh, K, M );
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
scan_distr<<<K, M/2>>>( (unsigned int *) d_Bl, M );
scan_distr<<<K, M/2>>>( (unsigned int *) d_Bh, M );
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
place<<<2*K, M/2>>>( d_aV, d_aP, d_outputVals, d_outputPos, d_Bl, d_Bh, LIMIT, numElems );
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
test<<<1, 1>>>( (unsigned int *) d_outputVals, d_outputPos, 0, numElems-1200, d_x ); // K*M
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
test<<<1, 1>>>( (unsigned int *) d_outputVals, d_outputPos, numElems-1100, numElems, d_x ); // K*M
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//
}
///////////////////////////////////////////////////////////////////////////////////
|
590ebff882a35103870dc5a47210bdd0449da8e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// fermi
/*
* Copyright 2018 Vrije Universiteit Amsterdam, The Netherlands
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* typedef struct __attribute__ ((packed)) { */
/* float real; */
/* float imag; */
/* } Complex; */
extern "C" {
__global__ void findPeakKernel(const int nrBlocks, const int n, float* peak, float* peaks, int* indicesPeak, const float* input);
}
__global__ void findPeakKernel(const int nrBlocks, const int n, float* peak, float* peaks, int* indicesPeak, const float* input) {
const int bi = blockIdx.x;
const int wti = threadIdx.y;
const int tti = threadIdx.x;
const int nrThreads = 256;
const int stepSize = nrBlocks * nrThreads;
const int nrThreadsNrThreads = min(32, nrThreads);
__shared__ float reduceMem[256];
__shared__ int indexMem[256];
const int ti = wti * (1 * nrThreadsNrThreads) + tti;
if (ti < nrThreads) {
float max = -1.0;
int index = -1;
for (int i = bi * nrThreads + ti; i < n; i += stepSize) {
const float val = fabs(input[i * 2 + 0]);
if (val > max) {
max = val;
index = i;
}
}
reduceMem[ti] = max;
indexMem[ti] = index;
__syncthreads();
for (int i = nrThreads / 2; i > 0; i >>= 1) {
if (ti < i) {
const float v1 = reduceMem[ti];
const float v2 = reduceMem[ti + i];
if (v2 > v1) {
reduceMem[ti] = v2;
indexMem[ti] = indexMem[ti + i];
}
}
__syncthreads();
}
if (ti == 0) {
peaks[bi] = reduceMem[0];
indicesPeak[bi] = indexMem[0];
if (bi == 0) {
*peak = input[(n - 1) * 2 + 0];
}
}
}
}
| 590ebff882a35103870dc5a47210bdd0449da8e3.cu | // fermi
/*
* Copyright 2018 Vrije Universiteit Amsterdam, The Netherlands
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* typedef struct __attribute__ ((packed)) { */
/* float real; */
/* float imag; */
/* } Complex; */
extern "C" {
__global__ void findPeakKernel(const int nrBlocks, const int n, float* peak, float* peaks, int* indicesPeak, const float* input);
}
__global__ void findPeakKernel(const int nrBlocks, const int n, float* peak, float* peaks, int* indicesPeak, const float* input) {
const int bi = blockIdx.x;
const int wti = threadIdx.y;
const int tti = threadIdx.x;
const int nrThreads = 256;
const int stepSize = nrBlocks * nrThreads;
const int nrThreadsNrThreads = min(32, nrThreads);
__shared__ float reduceMem[256];
__shared__ int indexMem[256];
const int ti = wti * (1 * nrThreadsNrThreads) + tti;
if (ti < nrThreads) {
float max = -1.0;
int index = -1;
for (int i = bi * nrThreads + ti; i < n; i += stepSize) {
const float val = fabs(input[i * 2 + 0]);
if (val > max) {
max = val;
index = i;
}
}
reduceMem[ti] = max;
indexMem[ti] = index;
__syncthreads();
for (int i = nrThreads / 2; i > 0; i >>= 1) {
if (ti < i) {
const float v1 = reduceMem[ti];
const float v2 = reduceMem[ti + i];
if (v2 > v1) {
reduceMem[ti] = v2;
indexMem[ti] = indexMem[ti + i];
}
}
__syncthreads();
}
if (ti == 0) {
peaks[bi] = reduceMem[0];
indicesPeak[bi] = indexMem[0];
if (bi == 0) {
*peak = input[(n - 1) * 2 + 0];
}
}
}
}
|
0c56fe0c23cf8b28c1626470250c80fc16114230.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void And( bool * x, size_t idx, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
x[(idx-2)*N + i] = x[(idx-2)*N + i] & x[(idx-1)*N + i];
}
return;
} | 0c56fe0c23cf8b28c1626470250c80fc16114230.cu | #include "includes.h"
__global__ void And( bool * x, size_t idx, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
x[(idx-2)*N + i] = x[(idx-2)*N + i] & x[(idx-1)*N + i];
}
return;
} |
6be7307cc691f6acc586ed61f0dc069bfbdda7cf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
//Number of elements of the inpu layers, that correspond to the number of pixels of a picture
#define PIXELS 3073
//Number of elements of the first hidden layer
#define HIDDEN_LAYER_1 2000
//Number of elements of the second hidden layer
#define HIDDEN_LAYER_2 450
//Number of elements of the output layer
#define OUTPUT_LAYER 10
//Learning rate of the algorithm
#define LEARNING_RATE 0.01
//Numbers of elements to use for training
#define ELEMENTS 1000
/*
* Function that given a vector and its size, print it
* In:
* f: vector of doubles to be printed
* N: size of the vector
*/
void print_vector(double *f, int N){
//Move in all vector to print each value
for (int i =0; i < N; ++i)
printf("%f\n",f[i]);
}
/*
* Function that given the value of the previous layer of a neural network, and its transition matrix
* to the new layer, calculates the net value of the layer
* In:
* input: vector that represents the previous layer of the layer to calculate
* matrix: transition matrix with the weigths of the neural network
* result: vector to store the results. It represents the layer to be calculated
* input_size: size of the previous layer
* hidden_size: size of the calculated layer
*
*/
void get_layer(double *input, double *matrix, double *result,int input_size, int hidden_size){
//Move thorugh every element of the layer
for (int i = 0; i<hidden_size; ++i){
//Init the neuron value in 0
result[i] = 0.0;
//Multiply each value of the previous neuron times its weigth and store it in result
for (int j = 0; j < input_size; ++j){
result[i] += input[j] * matrix[j*hidden_size+i];
}
}
}
/*
* Function that apply the sigmoid function to every element of a vector
* In:
* double: vector to apply the signmoid function to every element
* N: size of the vector
*/
void sigmoid(double *f, int N){
//Move through all elements of the vector
for (int i =0; i < N; ++i)
//Apply the sigmoid function to every element
//Sigmoid used: f(x) = 1 / (1 + e^(-x))
f[i] = 1.0 / (1.0 + exp(-f[i]));
}
/*
* Function that normalize the input, so all the values are equally important. Normalize is the process
* to transform every element of a vector to its correspondent value beetwen 0 and 1
* In:
* c: vector with the numbers between 0 and 255, each one which corresponds to a pixel of the input image
* f: vector so save the normalized vector
* N: size of the vectors
*/
void process_input(unsigned char *c, double *f, int N){
//Move through all elements of the vecotr
for (int i =0; i < N; ++i){
//Normalize x cosist of (x - Min)/ (Max-Min), in pixels Max is 255 and Min is 1
f[i] = (c[i]-1)/254.0;
}
}
/*
* Function that returns the index corresponding to the maximum element of an array
* In:
* f: vector of values
* N: size of vector
* Out:
* int corresponding to the index of the maximum value
*/
int max_index(double *f, int N){
//The max is the first element
int max_index = 0;
for (int i = 1; i < N; ++ i){
//If there is a new max, then substitute it
if (f[i] > f[max_index]){
max_index = i;
}
}
//Return the index of the max element
return max_index;
}
/*
* Function that calculate the error of the neural network
* In:
* f: output vector of the neural network
* output: expected value
* N: size of the vector
* Out:
* double corresponding to the calculated error of the NN
*/
double error(double *f, int output, int N) {
double *output_array = (double *) malloc(N * sizeof(double));
//Init the expected answer in 0
for (int i = 0; i < N; ++i)
output_array[i] = 0.0;
//Mark the expected answer 1
output_array[output] = 1.0;
//Init the error in 0
double error = 0.0;
//Calulate the total error, the rror is defined as
//(1/2)*SUM(di - oi)^2 where di is expected value, oi is calculated value
for (int i = 0; i < N; ++i)
error += (output_array[i] - f[i]) * (output_array[i] - f[i]);
return 0.5*error;
}
/*
* Function that calculate the error of the output layers
* In:
* f: value of the output neurons
* output: expected value
* N: size of the vector
* error_array: vector with the calculated error of every neuron
*
*/
void error_output(double *f, int output, int N, double *error_array){
double *output_array = (double *) malloc(N * sizeof(double));
//Init the expected answer in 0
for (int i = 0; i < N; ++i)
output_array[i] = 0.0;
//Mark the expected answer 1
output_array[output] = 1.0;
//Get the error for every neuron. The error in the output layer is defined as Si = (di - oi)*oi*(1-oi)
for (int i = 0; i < N; ++i){
error_array[i] = (output_array[i] - f[i])*f[i]*(1-f[i]);
}
}
/* Function that calculates the error of the hidden layers
* In:
* f: hidden layer calculated values
* error_array: error vector, to save the error of every neuron in the hidden layer
* next_layer_error: error vector of the next layer, neccessary to calculate the error of a hidden layer
* layer_size: size of the hidden layer
* next_size: size of the next layer
* transition_matrix: transition matrix to propagate values from hidden to next layer
*/
void error_hidden_layer(double *f, double* error_array, double *next_layer_error, int layer_size, int next_size, double *transition_matrix){
//Calculate error of every neuron in a hidden layer
//The error in a hidden layer is defined as Si = oi * (1 - oi) * SUM(Wij * Sj) where Sj is the error from next
for (int i = 0; i < layer_size; ++i){
//Inicial value of the sumatory
double product = 0.0;
for (int j = 0; j < next_size; ++j){
//Add Wij * Sj to acumulator
product += transition_matrix[i*next_size+j]*next_layer_error[j];
}
//Get the final product
error_array[i] = f[i]*(1-f[i])*product;
}
}
/*
* Function that calculates the variation of weigths of a neural network
* In:
* error_vector: error vector of the layer
* gradient: variance of the weights for every element
* layer: value of the elements of the layer
* N: rows of the transition matrix
* M: columns ot the transition matrix
*
*/
void calculate_error(double *error_vector, double* gradient, double *layer, int N, int M){
//Iterate over the matrix
for (int i =0; i<N; ++i){
for (int j = 0; j<M; ++j){
//The variance of the weigth is alpha * Sj * Oi
gradient[i*M+j] = LEARNING_RATE * error_vector[j] * layer[i];
}
}
}
/*
* Function that sums two matrix and store it directly in the first matrix
* In:
* transition: first matrix
* gradient: second matrix
* N: rows of the matrix
* M: columns of the matrix
*/
void sum_matrix(double *transition, double *gradient, int N, int M){
//Iterate over all the matrix
for (int i = 0; i < N; ++i){
for (int j = 0; j < M; ++j){
//Change the weight of every value of the transition matrix
transition[i*M+j] = transition[i*M+j] + gradient[i*M+j];
}
}
}
/*
* Function that reads a file, stores every vector of it, and then apply backpropagation
* In:
* file: name of the file with the data
* buffer: vector where every pixel will be stored
* transition_matrix_1: transition vector from input layer to hidden layer 1
* transition_matrix_2: transition vector from hidden layer 1 to hidden layer 2
* transition_matrix_3: transition vector from hidden layer 2 to output layer
* elements: number of elements to use for training
*/
void read_file(char *file, unsigned char* buffer,double *transition_matrix_1,double *transition_matrix_2, double *transition_matrix_3, int elements){
//Read the file that is in binary mode
FILE *f;
f = fopen(file, "rb");
//Variable for the expected output
unsigned char expected_output;
int i = 0;
//Init the total time to get the average of every classification
float total_time = 0.0;
//While there still elements on the file, and i is less than elements number cycloe
//Read PIXELS elements, because every pixel is represented by a byte, is enough to tell the reader to read exactly PIXELS bytes
while(1 == fread(buffer,PIXELS,1,f) && i < elements){
//Start taking the time
float tiempo1;
hipEvent_t inicio1, fin1;
hipEventCreate(&inicio1); // Se inicializan
hipEventCreate(&fin1);
hipEventRecord( inicio1, 0 );
//The first value of the vector is the expected time
expected_output = buffer[0];
//After the expected output is saved, it can be substituted by the bias
buffer[0] = 1;
//Init the layers of the network
double *hidden_layer_1 = (double*)malloc((HIDDEN_LAYER_1+1)*sizeof(double));
double *hidden_layer_2 = (double*)malloc((HIDDEN_LAYER_2+1)*sizeof(double));
double *output_layer = (double*)malloc(OUTPUT_LAYER*sizeof(double));
//Normalize the data
double *input = (double*)malloc(PIXELS*sizeof(double));
process_input(buffer,input,PIXELS);
//Forward information from input layer to hidden layer 1
get_layer(input,transition_matrix_1,hidden_layer_1,PIXELS,HIDDEN_LAYER_1);
//Apply signmoid to hidden layer 1
sigmoid(hidden_layer_1,HIDDEN_LAYER_1+1);
//Assign the bias
hidden_layer_1[HIDDEN_LAYER_1] = 1;
//Forward information from hidden layer 1 to hidden layer 2
get_layer(hidden_layer_1,transition_matrix_2,hidden_layer_2,HIDDEN_LAYER_1+1,HIDDEN_LAYER_2);
//Apply signmoid to hidden layer 2
sigmoid(hidden_layer_2,HIDDEN_LAYER_2+1);
//Assign the bias
hidden_layer_2[HIDDEN_LAYER_2] = 1;
//Forward information from hidden layer 2 to output layer
get_layer(hidden_layer_2,transition_matrix_3,output_layer,HIDDEN_LAYER_2+1,OUTPUT_LAYER);
//Apply signmoid to output layer
sigmoid(output_layer,OUTPUT_LAYER);
//Get the error of the output
double *errors_array = (double*)malloc(OUTPUT_LAYER* sizeof(double));
error_output(output_layer,expected_output,OUTPUT_LAYER,errors_array);
//Get the weight update for transision matrix 3
double *transition_matrix_3_gradient = (double*)malloc((HIDDEN_LAYER_2+1)*OUTPUT_LAYER*sizeof(double));
calculate_error(errors_array,transition_matrix_3_gradient,hidden_layer_2,HIDDEN_LAYER_2+1,OUTPUT_LAYER);
//Get the weight update for transision matrix 2
double *hidden_layer_array_2 = (double*)malloc((HIDDEN_LAYER_2+1)* sizeof(double));
error_hidden_layer(hidden_layer_2,hidden_layer_array_2,errors_array,OUTPUT_LAYER,HIDDEN_LAYER_2+1,transition_matrix_3);
double *transition_matrix_2_gradient = (double*)malloc((HIDDEN_LAYER_1+1)*HIDDEN_LAYER_2*sizeof(double));
calculate_error(hidden_layer_array_2,transition_matrix_2_gradient,hidden_layer_1,HIDDEN_LAYER_1+1,HIDDEN_LAYER_2);
//Get the weight update for transision matrix 1
double *hidden_layer_array_1 = (double*)malloc((HIDDEN_LAYER_1+1)* sizeof(double));
error_hidden_layer(hidden_layer_1,hidden_layer_array_1,hidden_layer_array_2,HIDDEN_LAYER_2,HIDDEN_LAYER_1+1,transition_matrix_2);
double *transition_matrix_1_gradient = (double*)malloc(PIXELS*HIDDEN_LAYER_1*sizeof(double));
calculate_error(hidden_layer_array_1,transition_matrix_1_gradient,input,PIXELS,HIDDEN_LAYER_1);
//Update the value of the transitions matrix once all have been calculated
sum_matrix(transition_matrix_1,transition_matrix_1_gradient,PIXELS,HIDDEN_LAYER_1);
sum_matrix(transition_matrix_2,transition_matrix_2_gradient,HIDDEN_LAYER_1+1,HIDDEN_LAYER_2);
sum_matrix(transition_matrix_3,transition_matrix_3_gradient,HIDDEN_LAYER_2+1,OUTPUT_LAYER);
//Siguiente elemento
++i;
//Free the information not required for the next iteration
free(hidden_layer_1);
free(hidden_layer_2);
free(output_layer);
free(input);
free(errors_array);
//Record the finish moment
hipEventRecord( fin1, 0);
hipEventSynchronize( fin1 );
hipEventElapsedTime( &tiempo1, inicio1, fin1 );
//Add the time to the total
total_time += tiempo1;
}
//Take the average time
total_time /= elements;
printf ("Tiempo promedio por clasificacion: %f\n", total_time);
}
/*
* Function that randomly initialize all values off the transiction matrix
* In:
* matrix: transition matrix of the neural network
* N: rows of the matrix
* M: columns of the matrix
*/
void init_layer(double *matrix, int N, int M){
//Iterate over the matrix
for (int i =0; i < N; ++ i){
for (int j = 0; j < M; ++j){
//Random number to see if its negative or positive
int sign = rand() % 2;
//Random number between 0 and 1
if (sign == 0)
matrix[i*M+j] = (rand() % 1000000) / 1000000.0;
else
matrix[i*M+j] = - ((rand() % 1000000) / 1000000.0);
}
}
}
/*
* Function that prints the value of the transition matrix
* In:
* matrix: transition matrix
* N: rows of the matrix
* M: columns of the matrix
*/
void print_layer(double *matrix, int N, int M){
//Iterate over the matrix and print
for (int i =0; i < N; ++ i){
for (int j = 0; j < M; ++j)
printf("%f ",matrix[i*M+j]);
printf("\n");
}
}
int main(int argc, char *argv[]){
//Init the random
srand(time(NULL));
//Review if the arguments
if ( argc != 2 ) {
/* We print argv[0] assuming it is the program name */
printf( "Error se debe ejecutar: %s <N>\n", argv[0] );
exit(0);
}
//Transform the argv to int
int elements = atoi(argv[1]);
printf("Se va a entrenar con %d elementos\n",elements);
//Create the space for the transition matrix
double *transition_matrix_1 = (double*)malloc(PIXELS*HIDDEN_LAYER_1*sizeof(double));
double *transition_matrix_2 = (double*)malloc((HIDDEN_LAYER_1+1)*HIDDEN_LAYER_2*sizeof(double));
double *transition_matrix_3 = (double*)malloc((HIDDEN_LAYER_2+1)*OUTPUT_LAYER*sizeof(double));
//Initialize the values of the matrix
init_layer(transition_matrix_1,PIXELS,HIDDEN_LAYER_1);
init_layer(transition_matrix_2,HIDDEN_LAYER_1+1,HIDDEN_LAYER_2);
init_layer(transition_matrix_3,HIDDEN_LAYER_2+1,OUTPUT_LAYER);
//Start the time
float tiempo1;
hipEvent_t inicio1, fin1;
unsigned char *buffer = (unsigned char*)malloc(PIXELS*sizeof(unsigned char));
//Start getting the time
hipEventCreate(&inicio1); // Se inicializan
hipEventCreate(&fin1);
hipEventRecord( inicio1, 0 );
//Start the training
read_file("data_batch_1.bin",buffer,transition_matrix_1,transition_matrix_2,transition_matrix_3,elements);
//Finish the time
hipEventRecord( fin1, 0); // Se toma el tiempo final.
hipEventSynchronize( fin1 ); // Se sincroniza
hipEventElapsedTime( &tiempo1, inicio1, fin1 );
//Print the time
printf("Tiempo total del programa: %f ms\n", tiempo1);
}
| 6be7307cc691f6acc586ed61f0dc069bfbdda7cf.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
//Number of elements of the inpu layers, that correspond to the number of pixels of a picture
#define PIXELS 3073
//Number of elements of the first hidden layer
#define HIDDEN_LAYER_1 2000
//Number of elements of the second hidden layer
#define HIDDEN_LAYER_2 450
//Number of elements of the output layer
#define OUTPUT_LAYER 10
//Learning rate of the algorithm
#define LEARNING_RATE 0.01
//Numbers of elements to use for training
#define ELEMENTS 1000
/*
* Function that given a vector and its size, print it
* In:
* f: vector of doubles to be printed
* N: size of the vector
*/
void print_vector(double *f, int N){
//Move in all vector to print each value
for (int i =0; i < N; ++i)
printf("%f\n",f[i]);
}
/*
* Function that given the value of the previous layer of a neural network, and its transition matrix
* to the new layer, calculates the net value of the layer
* In:
* input: vector that represents the previous layer of the layer to calculate
* matrix: transition matrix with the weigths of the neural network
* result: vector to store the results. It represents the layer to be calculated
* input_size: size of the previous layer
* hidden_size: size of the calculated layer
*
*/
void get_layer(double *input, double *matrix, double *result,int input_size, int hidden_size){
//Move thorugh every element of the layer
for (int i = 0; i<hidden_size; ++i){
//Init the neuron value in 0
result[i] = 0.0;
//Multiply each value of the previous neuron times its weigth and store it in result
for (int j = 0; j < input_size; ++j){
result[i] += input[j] * matrix[j*hidden_size+i];
}
}
}
/*
* Function that apply the sigmoid function to every element of a vector
* In:
* double: vector to apply the signmoid function to every element
* N: size of the vector
*/
void sigmoid(double *f, int N){
//Move through all elements of the vector
for (int i =0; i < N; ++i)
//Apply the sigmoid function to every element
//Sigmoid used: f(x) = 1 / (1 + e^(-x))
f[i] = 1.0 / (1.0 + exp(-f[i]));
}
/*
* Function that normalize the input, so all the values are equally important. Normalize is the process
* to transform every element of a vector to its correspondent value beetwen 0 and 1
* In:
* c: vector with the numbers between 0 and 255, each one which corresponds to a pixel of the input image
* f: vector so save the normalized vector
* N: size of the vectors
*/
void process_input(unsigned char *c, double *f, int N){
//Move through all elements of the vecotr
for (int i =0; i < N; ++i){
//Normalize x cosist of (x - Min)/ (Max-Min), in pixels Max is 255 and Min is 1
f[i] = (c[i]-1)/254.0;
}
}
/*
* Function that returns the index corresponding to the maximum element of an array
* In:
* f: vector of values
* N: size of vector
* Out:
* int corresponding to the index of the maximum value
*/
int max_index(double *f, int N){
//The max is the first element
int max_index = 0;
for (int i = 1; i < N; ++ i){
//If there is a new max, then substitute it
if (f[i] > f[max_index]){
max_index = i;
}
}
//Return the index of the max element
return max_index;
}
/*
* Function that calculate the error of the neural network
* In:
* f: output vector of the neural network
* output: expected value
* N: size of the vector
* Out:
* double corresponding to the calculated error of the NN
*/
double error(double *f, int output, int N) {
double *output_array = (double *) malloc(N * sizeof(double));
//Init the expected answer in 0
for (int i = 0; i < N; ++i)
output_array[i] = 0.0;
//Mark the expected answer 1
output_array[output] = 1.0;
//Init the error in 0
double error = 0.0;
//Calulate the total error, the rror is defined as
//(1/2)*SUM(di - oi)^2 where di is expected value, oi is calculated value
for (int i = 0; i < N; ++i)
error += (output_array[i] - f[i]) * (output_array[i] - f[i]);
return 0.5*error;
}
/*
* Function that calculate the error of the output layers
* In:
* f: value of the output neurons
* output: expected value
* N: size of the vector
* error_array: vector with the calculated error of every neuron
*
*/
void error_output(double *f, int output, int N, double *error_array){
double *output_array = (double *) malloc(N * sizeof(double));
//Init the expected answer in 0
for (int i = 0; i < N; ++i)
output_array[i] = 0.0;
//Mark the expected answer 1
output_array[output] = 1.0;
//Get the error for every neuron. The error in the output layer is defined as Si = (di - oi)*oi*(1-oi)
for (int i = 0; i < N; ++i){
error_array[i] = (output_array[i] - f[i])*f[i]*(1-f[i]);
}
}
/* Function that calculates the error of the hidden layers
* In:
* f: hidden layer calculated values
* error_array: error vector, to save the error of every neuron in the hidden layer
* next_layer_error: error vector of the next layer, neccessary to calculate the error of a hidden layer
* layer_size: size of the hidden layer
* next_size: size of the next layer
* transition_matrix: transition matrix to propagate values from hidden to next layer
*/
void error_hidden_layer(double *f, double* error_array, double *next_layer_error, int layer_size, int next_size, double *transition_matrix){
//Calculate error of every neuron in a hidden layer
//The error in a hidden layer is defined as Si = oi * (1 - oi) * SUM(Wij * Sj) where Sj is the error from next
for (int i = 0; i < layer_size; ++i){
//Inicial value of the sumatory
double product = 0.0;
for (int j = 0; j < next_size; ++j){
//Add Wij * Sj to acumulator
product += transition_matrix[i*next_size+j]*next_layer_error[j];
}
//Get the final product
error_array[i] = f[i]*(1-f[i])*product;
}
}
/*
* Function that calculates the variation of weigths of a neural network
* In:
* error_vector: error vector of the layer
* gradient: variance of the weights for every element
* layer: value of the elements of the layer
* N: rows of the transition matrix
* M: columns ot the transition matrix
*
*/
void calculate_error(double *error_vector, double* gradient, double *layer, int N, int M){
//Iterate over the matrix
for (int i =0; i<N; ++i){
for (int j = 0; j<M; ++j){
//The variance of the weigth is alpha * Sj * Oi
gradient[i*M+j] = LEARNING_RATE * error_vector[j] * layer[i];
}
}
}
/*
* Function that sums two matrix and store it directly in the first matrix
* In:
* transition: first matrix
* gradient: second matrix
* N: rows of the matrix
* M: columns of the matrix
*/
void sum_matrix(double *transition, double *gradient, int N, int M){
//Iterate over all the matrix
for (int i = 0; i < N; ++i){
for (int j = 0; j < M; ++j){
//Change the weight of every value of the transition matrix
transition[i*M+j] = transition[i*M+j] + gradient[i*M+j];
}
}
}
/*
* Function that reads a file, stores every vector of it, and then apply backpropagation
* In:
* file: name of the file with the data
* buffer: vector where every pixel will be stored
* transition_matrix_1: transition vector from input layer to hidden layer 1
* transition_matrix_2: transition vector from hidden layer 1 to hidden layer 2
* transition_matrix_3: transition vector from hidden layer 2 to output layer
* elements: number of elements to use for training
*/
void read_file(char *file, unsigned char* buffer,double *transition_matrix_1,double *transition_matrix_2, double *transition_matrix_3, int elements){
//Read the file that is in binary mode
FILE *f;
f = fopen(file, "rb");
//Variable for the expected output
unsigned char expected_output;
int i = 0;
//Init the total time to get the average of every classification
float total_time = 0.0;
//While there still elements on the file, and i is less than elements number cycloe
//Read PIXELS elements, because every pixel is represented by a byte, is enough to tell the reader to read exactly PIXELS bytes
while(1 == fread(buffer,PIXELS,1,f) && i < elements){
//Start taking the time
float tiempo1;
cudaEvent_t inicio1, fin1;
cudaEventCreate(&inicio1); // Se inicializan
cudaEventCreate(&fin1);
cudaEventRecord( inicio1, 0 );
//The first value of the vector is the expected time
expected_output = buffer[0];
//After the expected output is saved, it can be substituted by the bias
buffer[0] = 1;
//Init the layers of the network
double *hidden_layer_1 = (double*)malloc((HIDDEN_LAYER_1+1)*sizeof(double));
double *hidden_layer_2 = (double*)malloc((HIDDEN_LAYER_2+1)*sizeof(double));
double *output_layer = (double*)malloc(OUTPUT_LAYER*sizeof(double));
//Normalize the data
double *input = (double*)malloc(PIXELS*sizeof(double));
process_input(buffer,input,PIXELS);
//Forward information from input layer to hidden layer 1
get_layer(input,transition_matrix_1,hidden_layer_1,PIXELS,HIDDEN_LAYER_1);
//Apply signmoid to hidden layer 1
sigmoid(hidden_layer_1,HIDDEN_LAYER_1+1);
//Assign the bias
hidden_layer_1[HIDDEN_LAYER_1] = 1;
//Forward information from hidden layer 1 to hidden layer 2
get_layer(hidden_layer_1,transition_matrix_2,hidden_layer_2,HIDDEN_LAYER_1+1,HIDDEN_LAYER_2);
//Apply signmoid to hidden layer 2
sigmoid(hidden_layer_2,HIDDEN_LAYER_2+1);
//Assign the bias
hidden_layer_2[HIDDEN_LAYER_2] = 1;
//Forward information from hidden layer 2 to output layer
get_layer(hidden_layer_2,transition_matrix_3,output_layer,HIDDEN_LAYER_2+1,OUTPUT_LAYER);
//Apply signmoid to output layer
sigmoid(output_layer,OUTPUT_LAYER);
//Get the error of the output
double *errors_array = (double*)malloc(OUTPUT_LAYER* sizeof(double));
error_output(output_layer,expected_output,OUTPUT_LAYER,errors_array);
//Get the weight update for transision matrix 3
double *transition_matrix_3_gradient = (double*)malloc((HIDDEN_LAYER_2+1)*OUTPUT_LAYER*sizeof(double));
calculate_error(errors_array,transition_matrix_3_gradient,hidden_layer_2,HIDDEN_LAYER_2+1,OUTPUT_LAYER);
//Get the weight update for transision matrix 2
double *hidden_layer_array_2 = (double*)malloc((HIDDEN_LAYER_2+1)* sizeof(double));
error_hidden_layer(hidden_layer_2,hidden_layer_array_2,errors_array,OUTPUT_LAYER,HIDDEN_LAYER_2+1,transition_matrix_3);
double *transition_matrix_2_gradient = (double*)malloc((HIDDEN_LAYER_1+1)*HIDDEN_LAYER_2*sizeof(double));
calculate_error(hidden_layer_array_2,transition_matrix_2_gradient,hidden_layer_1,HIDDEN_LAYER_1+1,HIDDEN_LAYER_2);
//Get the weight update for transision matrix 1
double *hidden_layer_array_1 = (double*)malloc((HIDDEN_LAYER_1+1)* sizeof(double));
error_hidden_layer(hidden_layer_1,hidden_layer_array_1,hidden_layer_array_2,HIDDEN_LAYER_2,HIDDEN_LAYER_1+1,transition_matrix_2);
double *transition_matrix_1_gradient = (double*)malloc(PIXELS*HIDDEN_LAYER_1*sizeof(double));
calculate_error(hidden_layer_array_1,transition_matrix_1_gradient,input,PIXELS,HIDDEN_LAYER_1);
//Update the value of the transitions matrix once all have been calculated
sum_matrix(transition_matrix_1,transition_matrix_1_gradient,PIXELS,HIDDEN_LAYER_1);
sum_matrix(transition_matrix_2,transition_matrix_2_gradient,HIDDEN_LAYER_1+1,HIDDEN_LAYER_2);
sum_matrix(transition_matrix_3,transition_matrix_3_gradient,HIDDEN_LAYER_2+1,OUTPUT_LAYER);
//Siguiente elemento
++i;
//Free the information not required for the next iteration
free(hidden_layer_1);
free(hidden_layer_2);
free(output_layer);
free(input);
free(errors_array);
//Record the finish moment
cudaEventRecord( fin1, 0);
cudaEventSynchronize( fin1 );
cudaEventElapsedTime( &tiempo1, inicio1, fin1 );
//Add the time to the total
total_time += tiempo1;
}
//Take the average time
total_time /= elements;
printf ("Tiempo promedio por clasificacion: %f\n", total_time);
}
/*
* Function that randomly initialize all values off the transiction matrix
* In:
* matrix: transition matrix of the neural network
* N: rows of the matrix
* M: columns of the matrix
*/
void init_layer(double *matrix, int N, int M){
//Iterate over the matrix
for (int i =0; i < N; ++ i){
for (int j = 0; j < M; ++j){
//Random number to see if its negative or positive
int sign = rand() % 2;
//Random number between 0 and 1
if (sign == 0)
matrix[i*M+j] = (rand() % 1000000) / 1000000.0;
else
matrix[i*M+j] = - ((rand() % 1000000) / 1000000.0);
}
}
}
/*
* Function that prints the value of the transition matrix
* In:
* matrix: transition matrix
* N: rows of the matrix
* M: columns of the matrix
*/
void print_layer(double *matrix, int N, int M){
//Iterate over the matrix and print
for (int i =0; i < N; ++ i){
for (int j = 0; j < M; ++j)
printf("%f ",matrix[i*M+j]);
printf("\n");
}
}
int main(int argc, char *argv[]){
//Init the random
srand(time(NULL));
//Review if the arguments
if ( argc != 2 ) {
/* We print argv[0] assuming it is the program name */
printf( "Error se debe ejecutar: %s <N>\n", argv[0] );
exit(0);
}
//Transform the argv to int
int elements = atoi(argv[1]);
printf("Se va a entrenar con %d elementos\n",elements);
//Create the space for the transition matrix
double *transition_matrix_1 = (double*)malloc(PIXELS*HIDDEN_LAYER_1*sizeof(double));
double *transition_matrix_2 = (double*)malloc((HIDDEN_LAYER_1+1)*HIDDEN_LAYER_2*sizeof(double));
double *transition_matrix_3 = (double*)malloc((HIDDEN_LAYER_2+1)*OUTPUT_LAYER*sizeof(double));
//Initialize the values of the matrix
init_layer(transition_matrix_1,PIXELS,HIDDEN_LAYER_1);
init_layer(transition_matrix_2,HIDDEN_LAYER_1+1,HIDDEN_LAYER_2);
init_layer(transition_matrix_3,HIDDEN_LAYER_2+1,OUTPUT_LAYER);
//Start the time
float tiempo1;
cudaEvent_t inicio1, fin1;
unsigned char *buffer = (unsigned char*)malloc(PIXELS*sizeof(unsigned char));
//Start getting the time
cudaEventCreate(&inicio1); // Se inicializan
cudaEventCreate(&fin1);
cudaEventRecord( inicio1, 0 );
//Start the training
read_file("data_batch_1.bin",buffer,transition_matrix_1,transition_matrix_2,transition_matrix_3,elements);
//Finish the time
cudaEventRecord( fin1, 0); // Se toma el tiempo final.
cudaEventSynchronize( fin1 ); // Se sincroniza
cudaEventElapsedTime( &tiempo1, inicio1, fin1 );
//Print the time
printf("Tiempo total del programa: %f ms\n", tiempo1);
}
|
2d045f1697af8509025d84f0407eb24506b478ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgeadd_batched.cu normal z -> s, Tue Sep 2 12:38:15 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/* =====================================================================
Batches slacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
sgeadd_batched_kernel(
int m, int n,
float alpha,
const float * const *dAarray, int ldda,
float **dBarray, int lddb )
{
// dA and dB iterate across row i
const float *dA = dAarray[ blockIdx.y ];
float *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const float *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha REAL
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a REAL array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a REAL array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_sgeadd_batched_q(
magma_int_t m, magma_int_t n,
float alpha,
const float * const *dAarray, magma_int_t ldda,
float **dBarray, magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB, batchCount );
hipLaunchKernelGGL(( sgeadd_batched_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
/**
@see magmablas_sgeadd_batched_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_sgeadd_batched(
magma_int_t m, magma_int_t n,
float alpha,
const float * const *dAarray, magma_int_t ldda,
float **dBarray, magma_int_t lddb,
magma_int_t batchCount )
{
magmablas_sgeadd_batched_q(
m, n, alpha, dAarray, ldda, dBarray, lddb, batchCount, magma_stream );
}
| 2d045f1697af8509025d84f0407eb24506b478ca.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgeadd_batched.cu normal z -> s, Tue Sep 2 12:38:15 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/* =====================================================================
Batches slacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
sgeadd_batched_kernel(
int m, int n,
float alpha,
const float * const *dAarray, int ldda,
float **dBarray, int lddb )
{
// dA and dB iterate across row i
const float *dA = dAarray[ blockIdx.y ];
float *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const float *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha REAL
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a REAL array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a REAL array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_sgeadd_batched_q(
magma_int_t m, magma_int_t n,
float alpha,
const float * const *dAarray, magma_int_t ldda,
float **dBarray, magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB, batchCount );
sgeadd_batched_kernel<<< grid, threads, 0, queue >>>(
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
/**
@see magmablas_sgeadd_batched_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_sgeadd_batched(
magma_int_t m, magma_int_t n,
float alpha,
const float * const *dAarray, magma_int_t ldda,
float **dBarray, magma_int_t lddb,
magma_int_t batchCount )
{
magmablas_sgeadd_batched_q(
m, n, alpha, dAarray, ldda, dBarray, lddb, batchCount, magma_stream );
}
|
f29190ae9633ef2a662627d93736635de117207d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include "kernels.hip"
void resize_seq(uc *src, int srcx, int srcy, int srcw, int srch, int srcTotalWidth, //x,y,width,height
uc *dst, int dstx, int dsty, int dstw, int dsth, int dstTotalWidth) //x,y,width,height
{
//Every square of size (bw,bh), will be substituted
//by one pixel in the dst image
float bw = float(srcw) / dstw;
float bh = float(srch) / dsth;
//For each pixel in the dst
for(int dy = dsty; dy < dsty + dsth; ++dy)
{
for(int dx = dstx; dx < dstx + dstw; ++dx)
{
//Save in its position the mean of the corresponding window pixels
uc mean = getWindowMeanGS(src,
srcx + ceil(dx*bw), srcy + ceil(dy*bh), //x, y
floor(bw), floor(bh), //width height
srcTotalWidth //totalWidth
);
dst[dy * dstTotalWidth + dx] = mean;
}
}
}
__global__ void detectFaces(uc *img, int winWidth, int winHeight, uc *resultMatrix);
void CheckCudaError(int line) {
hipError_t error;
error = hipGetLastError();
if (error) {
printf("(ERROR) - %s in %s at line %d\n", hipGetErrorString(error), __FILE__, line);
fflush(stdout);
exit(EXIT_FAILURE);
}
}
#define CE() { CheckCudaError(__LINE__); }
int main(int argc, char** argv)
{
cout << "Usage: " << argv[0] << " <image file name>" << endl;
for (int i = 0; i < argc; ++i) { cout << argv[i] << endl; }
// Read input image
FaceDetection fc(argv[1]);
printf("image File: %s, size(%d px, %d px)\n",
fc.image->filename, fc.image->width(), fc.image->height());
// Convert input image to grayscale
int numBytesImageOriginal = fc.image->width() * fc.image->height() * sizeof(uc);
uc *h_imageGSOriginal = (uc*) malloc(numBytesImageOriginal);
printf("Adapting input. Creating grayscale image....\n");
for(int y = 0; y < fc.image->height(); ++y) {
for(int x = 0; x < fc.image->width(); ++x) {
h_imageGSOriginal[y * fc.image->width() + x] = fc.image->getGrayScale(Pixel(x,y));
}
}
// Resize input image
printf("Resizing original image....\n");
int numBytesImage = IMG_WIDTH * IMG_HEIGHT * sizeof(uc);
uc *h_imageGS = (uc*) malloc(numBytesImage);
resize_seq
(h_imageGSOriginal,
0, 0, fc.image->width(), fc.image->height(), fc.image->width(),
h_imageGS,
0, 0, IMG_WIDTH, IMG_HEIGHT, IMG_WIDTH);
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount < NUM_DEVICES) { printf("Not enough GPUs\n"); exit(-1); }
dim3 dimGrid(NUM_BLOCKS, NUM_BLOCKS, 1);
dim3 dimBlock(NUM_THREADS, 1, 1);
int winWidths[] = {35, 40, 45, 50, 55, 60, 65, 70, 75, 85, 95, 105, 115, 125, 140, 150, 160, 170, 180, 190};
int winHeights[] = {35, 40, 45, 50, 55, 60, 65, 70, 75, 85, 95, 105, 115, 125, 140, 150, 160, 170, 180, 190};
const int numWindowsWidth = (sizeof(winWidths) / sizeof(int));
const int numWindowsHeight = (sizeof(winHeights) / sizeof(int));
const int numWindows = numWindowsWidth * numWindowsHeight;
const int windowsPerDevice = numWindows / NUM_DEVICES;
const float widthRatio = float(fc.image->width())/IMG_WIDTH;
const float heightRatio = float(fc.image->height())/IMG_HEIGHT;
printf("Num windows: %i\n", numWindows);
printf("Windows per device: %i\n", windowsPerDevice);
// Get host memory
printf("Getting memory in the host to allocate resultMatrix...\n");
int numBytesResultMatrix = NUM_BLOCKS * NUM_BLOCKS * sizeof(uc);
uc *h_resultMatrix[numWindows];
for(int i = 0; i < numWindows; ++i)
h_resultMatrix[i]= (uc*) malloc(numBytesResultMatrix);
// Get memory in every device
uc *d_imageGS[NUM_DEVICES], *d_resultMatrix[NUM_DEVICES];
for(int i = 0; i < NUM_DEVICES; ++i)
{
printf("Getting memory in device %d...\n", i);
hipSetDevice(i);
#ifndef USE_PINNED
hipMalloc((uc**)&d_imageGS[i], numBytesImage); CE();
hipMalloc((uc**)&d_resultMatrix[i], numBytesResultMatrix); CE();
#else
hipHostMalloc((uc**)&d_imageGS[i], numBytesImage); CE();
hipHostMalloc((uc**)&d_resultMatrix[i], numBytesResultMatrix); CE();
#endif
}
// Copy data from host to device, execute kernel, copy data from device to host
hipSetDevice(0);
hipEvent_t E0, E1;
hipEventCreate(&E0);
hipEventCreate(&E1);
hipEventRecord(E0, 0);
for(int j = 0; j < windowsPerDevice; ++j)
{
for(int i = 0; i < NUM_DEVICES; ++i)
{
hipSetDevice(i);
printf("Copying image from host to device %d...\n", i);
fflush(stdout);
hipMemcpyAsync(d_imageGS[i], h_imageGS, numBytesImage, hipMemcpyHostToDevice); CE();
int index = (i*windowsPerDevice + j);
int wi = index / numWindowsWidth;
int hi = index % numWindowsHeight;
printf("Executing kernel detectFaces on device %d...\n", i);
fflush(stdout);
hipLaunchKernelGGL(( detectFaces), dim3(dimGrid), dim3(dimBlock), 0, 0, d_imageGS[i], winWidths[wi] / widthRatio, winHeights[hi] / heightRatio, d_resultMatrix[i]); CE();
}
for(int i = 0; i < NUM_DEVICES; ++i)
{
hipSetDevice(i);
int index = (i*windowsPerDevice + j);
printf("Retrieving resultMatrix from device %d to host...\n", i);
fflush(stdout);
hipMemcpyAsync(h_resultMatrix[index], d_resultMatrix[i], numBytesResultMatrix, hipMemcpyDeviceToHost); CE();
}
}
for(int i = 0; i < NUM_DEVICES; ++i) { hipSetDevice(i); hipDeviceSynchronize(); }
hipSetDevice(0);
hipEventRecord(E1, 0);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, E0, E1);
printf("Kernel elapsed time: %4.6f\n", elapsedTime);
// Process results
for(int k = 0; k < numWindows; ++k)
{
for(int i = 0; i < NUM_BLOCKS; ++i)
{
for(int j = 0; j < NUM_BLOCKS; ++j)
{
if (h_resultMatrix[k][i * NUM_BLOCKS + j] == 1)
{
int wi = k / numWindowsWidth;
int hi = k % numWindowsHeight;
int kernelStepWidth = (IMG_WIDTH - winWidths[wi]/widthRatio) / NUM_BLOCKS + 1;
int kernelStepHeight = (IMG_HEIGHT - winHeights[hi]/heightRatio) / NUM_BLOCKS + 1;
printf("Result found for size(%d,%d) in x,y: (%d,%d)\n", winWidths[wi], winHeights[hi], j, i);
fc.resultWindows.push_back(Box(int(j * kernelStepWidth * widthRatio),
int(i * kernelStepHeight * heightRatio),
int(winWidths[wi]),
int(winHeights[hi])));
}
}
}
}
fc.saveResult();
// Free memory in every device
printf("Freeing device memory...\n");
for(int i = 0; i < NUM_DEVICES; ++i)
{
hipSetDevice(i);
hipFree(d_imageGS[i]);
hipFree(d_resultMatrix[i]);
}
printf("Done.\n");
}
| f29190ae9633ef2a662627d93736635de117207d.cu | #include "common.h"
#include "kernels.cu"
void resize_seq(uc *src, int srcx, int srcy, int srcw, int srch, int srcTotalWidth, //x,y,width,height
uc *dst, int dstx, int dsty, int dstw, int dsth, int dstTotalWidth) //x,y,width,height
{
//Every square of size (bw,bh), will be substituted
//by one pixel in the dst image
float bw = float(srcw) / dstw;
float bh = float(srch) / dsth;
//For each pixel in the dst
for(int dy = dsty; dy < dsty + dsth; ++dy)
{
for(int dx = dstx; dx < dstx + dstw; ++dx)
{
//Save in its position the mean of the corresponding window pixels
uc mean = getWindowMeanGS(src,
srcx + ceil(dx*bw), srcy + ceil(dy*bh), //x, y
floor(bw), floor(bh), //width height
srcTotalWidth //totalWidth
);
dst[dy * dstTotalWidth + dx] = mean;
}
}
}
__global__ void detectFaces(uc *img, int winWidth, int winHeight, uc *resultMatrix);
void CheckCudaError(int line) {
cudaError_t error;
error = cudaGetLastError();
if (error) {
printf("(ERROR) - %s in %s at line %d\n", cudaGetErrorString(error), __FILE__, line);
fflush(stdout);
exit(EXIT_FAILURE);
}
}
#define CE() { CheckCudaError(__LINE__); }
int main(int argc, char** argv)
{
cout << "Usage: " << argv[0] << " <image file name>" << endl;
for (int i = 0; i < argc; ++i) { cout << argv[i] << endl; }
// Read input image
FaceDetection fc(argv[1]);
printf("image File: %s, size(%d px, %d px)\n",
fc.image->filename, fc.image->width(), fc.image->height());
// Convert input image to grayscale
int numBytesImageOriginal = fc.image->width() * fc.image->height() * sizeof(uc);
uc *h_imageGSOriginal = (uc*) malloc(numBytesImageOriginal);
printf("Adapting input. Creating grayscale image....\n");
for(int y = 0; y < fc.image->height(); ++y) {
for(int x = 0; x < fc.image->width(); ++x) {
h_imageGSOriginal[y * fc.image->width() + x] = fc.image->getGrayScale(Pixel(x,y));
}
}
// Resize input image
printf("Resizing original image....\n");
int numBytesImage = IMG_WIDTH * IMG_HEIGHT * sizeof(uc);
uc *h_imageGS = (uc*) malloc(numBytesImage);
resize_seq
(h_imageGSOriginal,
0, 0, fc.image->width(), fc.image->height(), fc.image->width(),
h_imageGS,
0, 0, IMG_WIDTH, IMG_HEIGHT, IMG_WIDTH);
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount < NUM_DEVICES) { printf("Not enough GPUs\n"); exit(-1); }
dim3 dimGrid(NUM_BLOCKS, NUM_BLOCKS, 1);
dim3 dimBlock(NUM_THREADS, 1, 1);
int winWidths[] = {35, 40, 45, 50, 55, 60, 65, 70, 75, 85, 95, 105, 115, 125, 140, 150, 160, 170, 180, 190};
int winHeights[] = {35, 40, 45, 50, 55, 60, 65, 70, 75, 85, 95, 105, 115, 125, 140, 150, 160, 170, 180, 190};
const int numWindowsWidth = (sizeof(winWidths) / sizeof(int));
const int numWindowsHeight = (sizeof(winHeights) / sizeof(int));
const int numWindows = numWindowsWidth * numWindowsHeight;
const int windowsPerDevice = numWindows / NUM_DEVICES;
const float widthRatio = float(fc.image->width())/IMG_WIDTH;
const float heightRatio = float(fc.image->height())/IMG_HEIGHT;
printf("Num windows: %i\n", numWindows);
printf("Windows per device: %i\n", windowsPerDevice);
// Get host memory
printf("Getting memory in the host to allocate resultMatrix...\n");
int numBytesResultMatrix = NUM_BLOCKS * NUM_BLOCKS * sizeof(uc);
uc *h_resultMatrix[numWindows];
for(int i = 0; i < numWindows; ++i)
h_resultMatrix[i]= (uc*) malloc(numBytesResultMatrix);
// Get memory in every device
uc *d_imageGS[NUM_DEVICES], *d_resultMatrix[NUM_DEVICES];
for(int i = 0; i < NUM_DEVICES; ++i)
{
printf("Getting memory in device %d...\n", i);
cudaSetDevice(i);
#ifndef USE_PINNED
cudaMalloc((uc**)&d_imageGS[i], numBytesImage); CE();
cudaMalloc((uc**)&d_resultMatrix[i], numBytesResultMatrix); CE();
#else
cudaMallocHost((uc**)&d_imageGS[i], numBytesImage); CE();
cudaMallocHost((uc**)&d_resultMatrix[i], numBytesResultMatrix); CE();
#endif
}
// Copy data from host to device, execute kernel, copy data from device to host
cudaSetDevice(0);
cudaEvent_t E0, E1;
cudaEventCreate(&E0);
cudaEventCreate(&E1);
cudaEventRecord(E0, 0);
for(int j = 0; j < windowsPerDevice; ++j)
{
for(int i = 0; i < NUM_DEVICES; ++i)
{
cudaSetDevice(i);
printf("Copying image from host to device %d...\n", i);
fflush(stdout);
cudaMemcpyAsync(d_imageGS[i], h_imageGS, numBytesImage, cudaMemcpyHostToDevice); CE();
int index = (i*windowsPerDevice + j);
int wi = index / numWindowsWidth;
int hi = index % numWindowsHeight;
printf("Executing kernel detectFaces on device %d...\n", i);
fflush(stdout);
detectFaces<<<dimGrid, dimBlock>>>(d_imageGS[i], winWidths[wi] / widthRatio, winHeights[hi] / heightRatio, d_resultMatrix[i]); CE();
}
for(int i = 0; i < NUM_DEVICES; ++i)
{
cudaSetDevice(i);
int index = (i*windowsPerDevice + j);
printf("Retrieving resultMatrix from device %d to host...\n", i);
fflush(stdout);
cudaMemcpyAsync(h_resultMatrix[index], d_resultMatrix[i], numBytesResultMatrix, cudaMemcpyDeviceToHost); CE();
}
}
for(int i = 0; i < NUM_DEVICES; ++i) { cudaSetDevice(i); cudaDeviceSynchronize(); }
cudaSetDevice(0);
cudaEventRecord(E1, 0);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, E0, E1);
printf("Kernel elapsed time: %4.6f\n", elapsedTime);
// Process results
for(int k = 0; k < numWindows; ++k)
{
for(int i = 0; i < NUM_BLOCKS; ++i)
{
for(int j = 0; j < NUM_BLOCKS; ++j)
{
if (h_resultMatrix[k][i * NUM_BLOCKS + j] == 1)
{
int wi = k / numWindowsWidth;
int hi = k % numWindowsHeight;
int kernelStepWidth = (IMG_WIDTH - winWidths[wi]/widthRatio) / NUM_BLOCKS + 1;
int kernelStepHeight = (IMG_HEIGHT - winHeights[hi]/heightRatio) / NUM_BLOCKS + 1;
printf("Result found for size(%d,%d) in x,y: (%d,%d)\n", winWidths[wi], winHeights[hi], j, i);
fc.resultWindows.push_back(Box(int(j * kernelStepWidth * widthRatio),
int(i * kernelStepHeight * heightRatio),
int(winWidths[wi]),
int(winHeights[hi])));
}
}
}
}
fc.saveResult();
// Free memory in every device
printf("Freeing device memory...\n");
for(int i = 0; i < NUM_DEVICES; ++i)
{
cudaSetDevice(i);
cudaFree(d_imageGS[i]);
cudaFree(d_resultMatrix[i]);
}
printf("Done.\n");
}
|
8e30c2e3ad0280ca0a6584316f7280b70989f609.hip | // !!! This is a file automatically generated by hipify!!!
#include <time.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#define STOP 0
#define START 1
#define BLOCK_X 16
#define BLOCK_Y 16
extern "C" void chrono (int kind, float *time);
__global__ void kconvol (float *gpu_a, float *gpu_b, int pitch, int n) {
int ig, jg, lg, il, jl, ll;
__shared__ float la[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ float lb[(BLOCK_X+2)*(BLOCK_Y+2)];
// A thread now has two sets of coordinates :
// (ig, jg) in the global array
// (il, jl) in the local array (shared) of size (BLOCK_X+2)*(BLOCK_Y+2)
ig = blockDim.x*blockIdx.x+threadIdx.x;
jg = blockDim.y*blockIdx.y+threadIdx.y;
lg = ig+jg*pitch;
// UP TO YOU : write below the indices il and jl
il = ?...
jl = ?...
ll = il+jl*(BLOCK_X+2);
// What does the following line correspond to ?
la[ll] = gpu_a[lg];
if ((il == 1) && (ig > 0)) // What does the following line correespond to ?
la[ll-1] = gpu_a[lg-1];
if ((jl == 1) && (jg > 0))
la[ll-BLOCK_X-2] = gpu_a[lg-pitch];
if ((il == BLOCK_X) && (ig < n-1)) // UP TO YOU The following line is missing. Find out what was intended
//??...............??
if ((jl == BLOCK_Y) && (jg < n-1)) // UP TO YOU Find out the missing offset of local array below
la[ll+/*MISSING*/] = gpu_a[lg+pitch];
__syncthreads ();
if ((ig >= n) || (jg >= n)) return;
if ((ig == 0) || (jg == 0) || (ig == n-1) || (jg == n-1)) {
lb[ll] = la[ll];
}
else
/* UP TO YOU : fill up below the missing indices */
lb[ll]=(1.f/5.f)*( +la[/* MISSING */]+ \
la[ll-1] +la[ll] +la[ll+1]+ \
+la[/* MISSING */]);
gpu_b[lg] = lb[ll];
}
extern "C" void gpu_convol (float *a, float *b, int n) {
float *gpu_a;
float *gpu_b;
hipError_t err;
size_t pitch;
float time;
err = hipMallocPitch (&gpu_a, &pitch, n*sizeof(float), n);
if (err != 0) {
printf ("Error allocating gpu_a: %s\n", hipGetErrorString (err));
exit (1);
}
err = hipMallocPitch (&gpu_b, &pitch, n*sizeof(float), n);
if (err != 0) {
printf ("Error allocating gpu_b: %s\n", hipGetErrorString (err));
exit (1);
}
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid;
grid.x = (n-1)/BLOCK_X+1;
grid.y = (n-1)/BLOCK_Y+1;
hipMemcpy2D (gpu_a, pitch, a, n*sizeof(float), n*sizeof(float), n, hipMemcpyHostToDevice);
chrono (START, &time);
hipLaunchKernelGGL(( kconvol) , dim3(grid), dim3(block), 0, 0, gpu_a, gpu_b, pitch/sizeof(float), n);
err=hipDeviceSynchronize ();
chrono (STOP, &time);
printf ("Convolution took %f sec. on GPU\n", time);
hipMemcpy2D (b, n*sizeof(float), gpu_b, pitch, n*sizeof(float), n, hipMemcpyDeviceToHost);
if (err != 0) {
printf ("%s\n", hipGetErrorString (err));
exit (1);
}
hipFree (gpu_a);
hipFree (gpu_b);
}
| 8e30c2e3ad0280ca0a6584316f7280b70989f609.cu | #include <time.h>
#include <cuda.h>
#include <stdio.h>
#define STOP 0
#define START 1
#define BLOCK_X 16
#define BLOCK_Y 16
extern "C" void chrono (int kind, float *time);
__global__ void kconvol (float *gpu_a, float *gpu_b, int pitch, int n) {
int ig, jg, lg, il, jl, ll;
__shared__ float la[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ float lb[(BLOCK_X+2)*(BLOCK_Y+2)];
// A thread now has two sets of coordinates :
// (ig, jg) in the global array
// (il, jl) in the local array (shared) of size (BLOCK_X+2)*(BLOCK_Y+2)
ig = blockDim.x*blockIdx.x+threadIdx.x;
jg = blockDim.y*blockIdx.y+threadIdx.y;
lg = ig+jg*pitch;
// UP TO YOU : write below the indices il and jl
il = ?...
jl = ?...
ll = il+jl*(BLOCK_X+2);
// What does the following line correspond to ?
la[ll] = gpu_a[lg];
if ((il == 1) && (ig > 0)) // What does the following line correespond to ?
la[ll-1] = gpu_a[lg-1];
if ((jl == 1) && (jg > 0))
la[ll-BLOCK_X-2] = gpu_a[lg-pitch];
if ((il == BLOCK_X) && (ig < n-1)) // UP TO YOU The following line is missing. Find out what was intended
//??...............??
if ((jl == BLOCK_Y) && (jg < n-1)) // UP TO YOU Find out the missing offset of local array below
la[ll+/*MISSING*/] = gpu_a[lg+pitch];
__syncthreads ();
if ((ig >= n) || (jg >= n)) return;
if ((ig == 0) || (jg == 0) || (ig == n-1) || (jg == n-1)) {
lb[ll] = la[ll];
}
else
/* UP TO YOU : fill up below the missing indices */
lb[ll]=(1.f/5.f)*( +la[/* MISSING */]+ \
la[ll-1] +la[ll] +la[ll+1]+ \
+la[/* MISSING */]);
gpu_b[lg] = lb[ll];
}
extern "C" void gpu_convol (float *a, float *b, int n) {
float *gpu_a;
float *gpu_b;
cudaError_t err;
size_t pitch;
float time;
err = cudaMallocPitch (&gpu_a, &pitch, n*sizeof(float), n);
if (err != 0) {
printf ("Error allocating gpu_a: %s\n", cudaGetErrorString (err));
exit (1);
}
err = cudaMallocPitch (&gpu_b, &pitch, n*sizeof(float), n);
if (err != 0) {
printf ("Error allocating gpu_b: %s\n", cudaGetErrorString (err));
exit (1);
}
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid;
grid.x = (n-1)/BLOCK_X+1;
grid.y = (n-1)/BLOCK_Y+1;
cudaMemcpy2D (gpu_a, pitch, a, n*sizeof(float), n*sizeof(float), n, cudaMemcpyHostToDevice);
chrono (START, &time);
kconvol <<<grid, block>>> (gpu_a, gpu_b, pitch/sizeof(float), n);
err=cudaThreadSynchronize ();
chrono (STOP, &time);
printf ("Convolution took %f sec. on GPU\n", time);
cudaMemcpy2D (b, n*sizeof(float), gpu_b, pitch, n*sizeof(float), n, cudaMemcpyDeviceToHost);
if (err != 0) {
printf ("%s\n", cudaGetErrorString (err));
exit (1);
}
cudaFree (gpu_a);
cudaFree (gpu_b);
}
|
8354152fe4023c116328af3540f9d9236bc78533.hip | // !!! This is a file automatically generated by hipify!!!
extern "C" {
#include <hip/hip_runtime.h>
#include "conf.h"
#include "stencil.h"
#include <stdio.h>
}
#define ROTATE_DOWN(val,MAX) ((val-1==-1)?MAX-1:val-1)
#define ROTATE_UP(val,MAX) ((val+1)%MAX)
/**
* GPU Device kernel for the for 2D stencil
* First attempt during hackaton
* M = Rows, N = Cols INCLUDING HALOS
* In this version now we replace the size of the shared memory to be just 3 rows (actually 1+HALO*2) rows
*/
__global__ void gpu_stencil2D_4pt_hack2(double * dst, double * src, int M, int N)
{
//Declaring the shared memory array for source
__shared__ double shared_mem[ 1 + HALO*2 ] [ GRID_TILE_X + HALO*2]; //1 is the row I am modifying
//double * shSrc = shared_mem;
//indexes
int i, j, curRow;
//Cols * numRows/Tile * tileIndex
int base_global_idx = ( N ) * ( GRID_TILE_Y * blockIdx.y ) + GRID_TILE_X*blockIdx.x;
int center = 1,north = 0,south = 2; //indexes for the current location in the shared memory
//copy the shared memory to fill the pipeline
for (i = 0 ; i < 1+HALO*2 ; i ++ )
for (j = threadIdx.x ; j < GRID_TILE_X+2*HALO ; j+=blockDim.x)
{
shared_mem [i][j] = src[base_global_idx + i*N + j];
}
__syncthreads();
//Pipelined copy one row and process it
for ( curRow = HALO; curRow < GRID_TILE_Y; curRow+=1 )
{
//Stencil computation
for (j = threadIdx.x + HALO ; j < GRID_TILE_X+HALO ; j+=blockDim.x)
{
//top + bottom + left + right
dst[base_global_idx + curRow*N + j] = (shared_mem[north][j] + shared_mem[south][j] + shared_mem[center][j-1] + shared_mem[center][j+1] )/5.5;
}
__syncthreads();
//We are copying from dst to shared memory.
for (j = threadIdx.x ; j < GRID_TILE_X+2*HALO ; j+=blockDim.x)
{
shared_mem [north][j] = src[base_global_idx + (curRow+2)*N + j];
}
center = ROTATE_UP(center,3);
south = ROTATE_UP(south,3);
north = ROTATE_UP(north,3);
__syncthreads();
}
//Dranning the pipeline
for (j = threadIdx.x + HALO ; j < GRID_TILE_X+HALO ; j+=blockDim.x)
{
//top + bottom + left + right
dst[base_global_idx + curRow*N + j] = (shared_mem[north][j] + shared_mem[south][j] + shared_mem[center][j-1] + shared_mem[center][j+1] )/5.5;
}
__syncthreads();
}
///**
// * GPU Device kernel for the for 2D stencil
// * First attempt during hackaton
// * M = Rows, N = Cols INCLUDING HALOS
// */
//__global__ void gpu_stencil2D_4pt_hack1(double * dst, double * src, int M, int N)
//{
//
// //Declaring the shared memory array for source
// __shared__ double shared_mem[GRID_TILE_Y + HALO*2 ] [ GRID_TILE_X + HALO*2];
// //double * shSrc = shared_mem;
//
// //indexes
// int i, j;
//
// //Cols * numRows/Tile * tileIndex
// int base_global_idx = ( N ) * ( GRID_TILE_Y * blockIdx.y ) + GRID_TILE_X*blockIdx.x;
//
// //We are copying from dst to shared memory.
// for (i = 0 ; i < GRID_TILE_Y+2*HALO ; i ++ )
// for (j = threadIdx.x ; j < GRID_TILE_X+2*HALO ; j+=blockDim.x)
// {
// shared_mem [i][j] = src[base_global_idx + i*N + j];
// }
//
// __syncthreads();
//
// //Stencil computation
// for (i = HALO ; i < GRID_TILE_Y+HALO ; i ++ )
// for (j = threadIdx.x + HALO ; j < GRID_TILE_X+HALO ; j+=blockDim.x)
// {
// //top + bottom + left + right
// dst[base_global_idx + i*N + j] = (shared_mem[i-1][j] + shared_mem[i+1][j] + shared_mem[i][j-1] + shared_mem[i][j+1] )/5.5;
// }
//
// __syncthreads();
//}
/**
* GPU Device kernel for the for 2D stencil
* M = Rows, N = Cols
*/
__global__ void gpu_stencil2D_4pt(double * dst, double * src, int M, int N)
{
//Declaring the shared memory array for source
extern __shared__ double shared_mem[];
double * shSrc = shared_mem;
//indexes
int i, j;
//neighbor's values
double north, south, east, west;
//SharedMem Collumns Dimension
int smColDim = HALO*2+blockDim.y*TILE_SIZE;
int smRowDim = HALO*2+blockDim.x*TILE_SIZE;
//Copying to shared memory
//Inner part
for ( i = 0 ; i < TILE_SIZE ; i++ )
{
for ( j = 0 ; j < TILE_SIZE ; j++ )
{
int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO;
int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j;
shSrc[shMemIndex]=src[globalIndex];
}
}
//Halos
if (threadIdx.x == 0 && threadIdx.y == 0 )
{
int indexTopHalo, indexBottomHalo, indexLeftHalo, indexRightHalo;
//For Bottom and top row
for ( i = 0 ; i < HALO ; i++ )
{
for ( j = 0 ; j < smColDim ; j++ )
{
indexTopHalo = (blockIdx.x*blockDim.x*TILE_SIZE+i)*N + (blockIdx.y*blockDim.y*TILE_SIZE) + j;
indexBottomHalo = (HALO + (blockIdx.x+1)*blockDim.x*TILE_SIZE)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+j;
shSrc[i*smColDim+j] = src[indexTopHalo];
shSrc[(HALO+blockDim.x*TILE_SIZE+i)*smColDim + j] = src[indexBottomHalo];
}
}
//For right and left Columns
for ( i = 0 ; i < HALO ; i++ )
{
for ( j = 0 ; j < smRowDim-HALO*2; j ++ )
{
indexLeftHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+i;
indexRightHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + ((blockIdx.y+1)*blockDim.y*TILE_SIZE)+HALO+i;
shSrc[(HALO+j)*smColDim+i] = src[indexLeftHalo];
shSrc[(HALO+j+1)*smColDim-HALO+i] = src[indexRightHalo];
}
}
}
__syncthreads();
for ( i = 0 ; i < TILE_SIZE ; i++ )
{
for ( j = 0 ; j < TILE_SIZE ; j++ )
{
int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO;
int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j;
//Getting the neighbohrs
north = shSrc[shMemIndex-smColDim];
south = shSrc[shMemIndex+smColDim];
east = shSrc[shMemIndex+1];
west = shSrc[shMemIndex-1];
//Real Stencil operation
dst[globalIndex] = ( north + south + east + west )/5.5;
}
}
__syncthreads();
}
/**
* Nave 4pt stencil code for 2D arrays.
*/
void
stencil2D4pt ( double* __restrict__ dst, double* __restrict__ src,
const size_t n_rows, const size_t n_cols,
const size_t n_tsteps )
{
typedef double (*Array2D)[n_cols];
volatile Array2D DST = (Array2D) dst,
SRC = (Array2D) src;
for (size_t ts = 0; ts < n_tsteps; ++ts) {
for (size_t i = 1; i < n_rows-1; ++i) {
for (size_t j = 1; j < n_cols-1; ++j) {
DST[i][j] = (SRC[i-1][j] + SRC[i+1][j] + SRC[i][j-1] + SRC[i][j+1])/5.5;
}
}
SWAP_PTR(&DST,&SRC);
}
}
extern "C"
void
stencil2D4pt_gpu( double * __restrict__ dst, double* __restrict__ src,
const size_t M, const size_t N,
const size_t NUM_ITERATIONS)//M Rows by N Columns
{
double size = sizeof(double) * M * N;
//device memory allocation
double * d_dst, * d_src;
hipMalloc( (void **) &d_dst, size);
hipMalloc( (void **) &d_src, size);
//dimmensions for indexes
// TODO the -2 is to remove the borders
dim3 dimBlock(MAX_BLOCK_DIM,MAX_BLOCK_DIM);
int gridx = (N-2)/(MAX_BLOCK_DIM*TILE_SIZE) + (((N-2)%(MAX_BLOCK_DIM*TILE_SIZE) == 0)? 0:1 ) ;
int gridy = (M-2)/(MAX_BLOCK_DIM*TILE_SIZE) + (((M-2)%(MAX_BLOCK_DIM*TILE_SIZE) == 0)? 0:1 ) ;
dim3 dimGrid(gridx,gridy);
//Shared memory size = inside + halo
int shMemSize=MAX_BLOCK_DIM*TILE_SIZE*MAX_BLOCK_DIM*TILE_SIZE*sizeof(double)+(HALO*MAX_BLOCK_DIM*TILE_SIZE+HALO*HALO)*4*sizeof(double);
//Hackaton dimensions
dim3 dimGrid_hack1((N-HALO*2)/GRID_TILE_X,(M-HALO*2)/GRID_TILE_Y);
//Copying the device memory
hipMemcpy(d_src, src, size, hipMemcpyHostToDevice);
hipMemcpy(d_dst, dst, size, hipMemcpyHostToDevice);
//printf("CUDA Stencil Code running... cycles = %d. dim = %d by %d \n",NUM_ITERATIONS,M,N);
int time_step = NUM_ITERATIONS;
while (time_step-- > 0)
{
//gpu_stencil2D_4pt<<<dimGrid,dimBlock,shMemSize>>>(d_dst,d_src,M,N);
//gpu_stencil2D_4pt_hack1<<<dimGrid_hack1,NUM_THREADS>>>(d_dst,d_src,M,N); //JOSE Hackathon!
//printf("before: d_src[10] = %ld",d_src[10]);
hipLaunchKernelGGL(( gpu_stencil2D_4pt_hack2), dim3(dimGrid_hack1),dim3(NUM_THREADS), 0, 0, d_dst,d_src,M,N);
//Inline swapping.
//printf("after: d_src[10] = %ld",d_src[10]);
double * temp;
if ( NUM_ITERATIONS%2 ==0 || time_step !=0)
{
temp=d_src;
d_src=d_dst;
d_dst=temp;
}
}
//Copying memory back from device to DRAM
//hipMemcpy(src, d_src, size, hipMemcpyDeviceToHost);
hipMemcpy(dst, d_dst, size, hipMemcpyDeviceToHost);
hipMemcpy(src, d_src, size, hipMemcpyDeviceToHost);
//Free device memory
hipFree(d_src); hipFree(d_dst);
}
//void*
//stencil_run(void* arg)
//{
// stencil_t* stencil = (stencil_t*)arg;
// STENCIL_COMPUTE(stencil->stencil,stencil->arg);
// return NULL;
//}
| 8354152fe4023c116328af3540f9d9236bc78533.cu | extern "C" {
#include <cuda.h>
#include "conf.h"
#include "stencil.h"
#include <stdio.h>
}
#define ROTATE_DOWN(val,MAX) ((val-1==-1)?MAX-1:val-1)
#define ROTATE_UP(val,MAX) ((val+1)%MAX)
/**
* GPU Device kernel for the for 2D stencil
* First attempt during hackaton
* M = Rows, N = Cols INCLUDING HALOS
* In this version now we replace the size of the shared memory to be just 3 rows (actually 1+HALO*2) rows
*/
__global__ void gpu_stencil2D_4pt_hack2(double * dst, double * src, int M, int N)
{
//Declaring the shared memory array for source
__shared__ double shared_mem[ 1 + HALO*2 ] [ GRID_TILE_X + HALO*2]; //1 is the row I am modifying
//double * shSrc = shared_mem;
//indexes
int i, j, curRow;
//Cols * numRows/Tile * tileIndex
int base_global_idx = ( N ) * ( GRID_TILE_Y * blockIdx.y ) + GRID_TILE_X*blockIdx.x;
int center = 1,north = 0,south = 2; //indexes for the current location in the shared memory
//copy the shared memory to fill the pipeline
for (i = 0 ; i < 1+HALO*2 ; i ++ )
for (j = threadIdx.x ; j < GRID_TILE_X+2*HALO ; j+=blockDim.x)
{
shared_mem [i][j] = src[base_global_idx + i*N + j];
}
__syncthreads();
//Pipelined copy one row and process it
for ( curRow = HALO; curRow < GRID_TILE_Y; curRow+=1 )
{
//Stencil computation
for (j = threadIdx.x + HALO ; j < GRID_TILE_X+HALO ; j+=blockDim.x)
{
//top + bottom + left + right
dst[base_global_idx + curRow*N + j] = (shared_mem[north][j] + shared_mem[south][j] + shared_mem[center][j-1] + shared_mem[center][j+1] )/5.5;
}
__syncthreads();
//We are copying from dst to shared memory.
for (j = threadIdx.x ; j < GRID_TILE_X+2*HALO ; j+=blockDim.x)
{
shared_mem [north][j] = src[base_global_idx + (curRow+2)*N + j];
}
center = ROTATE_UP(center,3);
south = ROTATE_UP(south,3);
north = ROTATE_UP(north,3);
__syncthreads();
}
//Dranning the pipeline
for (j = threadIdx.x + HALO ; j < GRID_TILE_X+HALO ; j+=blockDim.x)
{
//top + bottom + left + right
dst[base_global_idx + curRow*N + j] = (shared_mem[north][j] + shared_mem[south][j] + shared_mem[center][j-1] + shared_mem[center][j+1] )/5.5;
}
__syncthreads();
}
///**
// * GPU Device kernel for the for 2D stencil
// * First attempt during hackaton
// * M = Rows, N = Cols INCLUDING HALOS
// */
//__global__ void gpu_stencil2D_4pt_hack1(double * dst, double * src, int M, int N)
//{
//
// //Declaring the shared memory array for source
// __shared__ double shared_mem[GRID_TILE_Y + HALO*2 ] [ GRID_TILE_X + HALO*2];
// //double * shSrc = shared_mem;
//
// //indexes
// int i, j;
//
// //Cols * numRows/Tile * tileIndex
// int base_global_idx = ( N ) * ( GRID_TILE_Y * blockIdx.y ) + GRID_TILE_X*blockIdx.x;
//
// //We are copying from dst to shared memory.
// for (i = 0 ; i < GRID_TILE_Y+2*HALO ; i ++ )
// for (j = threadIdx.x ; j < GRID_TILE_X+2*HALO ; j+=blockDim.x)
// {
// shared_mem [i][j] = src[base_global_idx + i*N + j];
// }
//
// __syncthreads();
//
// //Stencil computation
// for (i = HALO ; i < GRID_TILE_Y+HALO ; i ++ )
// for (j = threadIdx.x + HALO ; j < GRID_TILE_X+HALO ; j+=blockDim.x)
// {
// //top + bottom + left + right
// dst[base_global_idx + i*N + j] = (shared_mem[i-1][j] + shared_mem[i+1][j] + shared_mem[i][j-1] + shared_mem[i][j+1] )/5.5;
// }
//
// __syncthreads();
//}
/**
* GPU Device kernel for the for 2D stencil
* M = Rows, N = Cols
*/
__global__ void gpu_stencil2D_4pt(double * dst, double * src, int M, int N)
{
//Declaring the shared memory array for source
extern __shared__ double shared_mem[];
double * shSrc = shared_mem;
//indexes
int i, j;
//neighbor's values
double north, south, east, west;
//SharedMem Collumns Dimension
int smColDim = HALO*2+blockDim.y*TILE_SIZE;
int smRowDim = HALO*2+blockDim.x*TILE_SIZE;
//Copying to shared memory
//Inner part
for ( i = 0 ; i < TILE_SIZE ; i++ )
{
for ( j = 0 ; j < TILE_SIZE ; j++ )
{
int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO;
int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j;
shSrc[shMemIndex]=src[globalIndex];
}
}
//Halos
if (threadIdx.x == 0 && threadIdx.y == 0 )
{
int indexTopHalo, indexBottomHalo, indexLeftHalo, indexRightHalo;
//For Bottom and top row
for ( i = 0 ; i < HALO ; i++ )
{
for ( j = 0 ; j < smColDim ; j++ )
{
indexTopHalo = (blockIdx.x*blockDim.x*TILE_SIZE+i)*N + (blockIdx.y*blockDim.y*TILE_SIZE) + j;
indexBottomHalo = (HALO + (blockIdx.x+1)*blockDim.x*TILE_SIZE)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+j;
shSrc[i*smColDim+j] = src[indexTopHalo];
shSrc[(HALO+blockDim.x*TILE_SIZE+i)*smColDim + j] = src[indexBottomHalo];
}
}
//For right and left Columns
for ( i = 0 ; i < HALO ; i++ )
{
for ( j = 0 ; j < smRowDim-HALO*2; j ++ )
{
indexLeftHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+i;
indexRightHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + ((blockIdx.y+1)*blockDim.y*TILE_SIZE)+HALO+i;
shSrc[(HALO+j)*smColDim+i] = src[indexLeftHalo];
shSrc[(HALO+j+1)*smColDim-HALO+i] = src[indexRightHalo];
}
}
}
__syncthreads();
for ( i = 0 ; i < TILE_SIZE ; i++ )
{
for ( j = 0 ; j < TILE_SIZE ; j++ )
{
int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO;
int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j;
//Getting the neighbohrs
north = shSrc[shMemIndex-smColDim];
south = shSrc[shMemIndex+smColDim];
east = shSrc[shMemIndex+1];
west = shSrc[shMemIndex-1];
//Real Stencil operation
dst[globalIndex] = ( north + south + east + west )/5.5;
}
}
__syncthreads();
}
/**
* Naïve 4pt stencil code for 2D arrays.
*/
void
stencil2D4pt ( double* __restrict__ dst, double* __restrict__ src,
const size_t n_rows, const size_t n_cols,
const size_t n_tsteps )
{
typedef double (*Array2D)[n_cols];
volatile Array2D DST = (Array2D) dst,
SRC = (Array2D) src;
for (size_t ts = 0; ts < n_tsteps; ++ts) {
for (size_t i = 1; i < n_rows-1; ++i) {
for (size_t j = 1; j < n_cols-1; ++j) {
DST[i][j] = (SRC[i-1][j] + SRC[i+1][j] + SRC[i][j-1] + SRC[i][j+1])/5.5;
}
}
SWAP_PTR(&DST,&SRC);
}
}
extern "C"
void
stencil2D4pt_gpu( double * __restrict__ dst, double* __restrict__ src,
const size_t M, const size_t N,
const size_t NUM_ITERATIONS)//M Rows by N Columns
{
double size = sizeof(double) * M * N;
//device memory allocation
double * d_dst, * d_src;
cudaMalloc( (void **) &d_dst, size);
cudaMalloc( (void **) &d_src, size);
//dimmensions for indexes
// TODO the -2 is to remove the borders
dim3 dimBlock(MAX_BLOCK_DIM,MAX_BLOCK_DIM);
int gridx = (N-2)/(MAX_BLOCK_DIM*TILE_SIZE) + (((N-2)%(MAX_BLOCK_DIM*TILE_SIZE) == 0)? 0:1 ) ;
int gridy = (M-2)/(MAX_BLOCK_DIM*TILE_SIZE) + (((M-2)%(MAX_BLOCK_DIM*TILE_SIZE) == 0)? 0:1 ) ;
dim3 dimGrid(gridx,gridy);
//Shared memory size = inside + halo
int shMemSize=MAX_BLOCK_DIM*TILE_SIZE*MAX_BLOCK_DIM*TILE_SIZE*sizeof(double)+(HALO*MAX_BLOCK_DIM*TILE_SIZE+HALO*HALO)*4*sizeof(double);
//Hackaton dimensions
dim3 dimGrid_hack1((N-HALO*2)/GRID_TILE_X,(M-HALO*2)/GRID_TILE_Y);
//Copying the device memory
cudaMemcpy(d_src, src, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_dst, dst, size, cudaMemcpyHostToDevice);
//printf("CUDA Stencil Code running... cycles = %d. dim = %d by %d \n",NUM_ITERATIONS,M,N);
int time_step = NUM_ITERATIONS;
while (time_step-- > 0)
{
//gpu_stencil2D_4pt<<<dimGrid,dimBlock,shMemSize>>>(d_dst,d_src,M,N);
//gpu_stencil2D_4pt_hack1<<<dimGrid_hack1,NUM_THREADS>>>(d_dst,d_src,M,N); //JOSE Hackathon!
//printf("before: d_src[10] = %ld",d_src[10]);
gpu_stencil2D_4pt_hack2<<<dimGrid_hack1,NUM_THREADS>>>(d_dst,d_src,M,N);
//Inline swapping.
//printf("after: d_src[10] = %ld",d_src[10]);
double * temp;
if ( NUM_ITERATIONS%2 ==0 || time_step !=0)
{
temp=d_src;
d_src=d_dst;
d_dst=temp;
}
}
//Copying memory back from device to DRAM
//cudaMemcpy(src, d_src, size, cudaMemcpyDeviceToHost);
cudaMemcpy(dst, d_dst, size, cudaMemcpyDeviceToHost);
cudaMemcpy(src, d_src, size, cudaMemcpyDeviceToHost);
//Free device memory
cudaFree(d_src); cudaFree(d_dst);
}
//void*
//stencil_run(void* arg)
//{
// stencil_t* stencil = (stencil_t*)arg;
// STENCIL_COMPUTE(stencil->stencil,stencil->arg);
// return NULL;
//}
|
5bbc61e4374d9cd1ecc420ed9af9a722ce168222.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDAlib.cuh"
#include "WAVFilter.h"
#include <chrono>
#define BLOCK_SIZE 64
__global__ void FilterEchoBlock(char* d_in, char* d_out, long offset, double gain)
{
__shared__ int s_data[BLOCK_SIZE];
char d_data;
int dst_x = blockIdx.x * blockDim.x + threadIdx.x;
int src_x = dst_x - offset;
// Load one element per thread from device memory and store it
d_data = d_in[dst_x];
if (dst_x > offset)
s_data[threadIdx.x] = *(d_in+src_x);
else
s_data[threadIdx.x] = 0;
d_data += s_data[threadIdx.x] * gain;
d_out[dst_x] = d_data;
}
void FilterEcho(Audio_WAV& origin, bool useCUDA, double delay, double gain)
{
WAV_HEADER origin_header = origin.get_header();
size_t memSize = origin_header.Subchunk2Size;
char* origin_bytes = origin.get_audio();
unsigned long byteperSecond = origin_header.sampleRate * origin_header.blockAlign;
long offset = byteperSecond * delay;
if (useCUDA)
{
//pointer for device
char *d_in, *d_out;
int numBlocks = (memSize / BLOCK_SIZE) + 1; //celling
int sharedMemSize = BLOCK_SIZE; //one byte for each thread
hipMalloc(&d_in, memSize);
hipMalloc(&d_out, memSize);
hipMemcpy(d_in, origin_bytes, memSize, hipMemcpyHostToDevice);
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(BLOCK_SIZE);
FilterEchoBlock << < dimGrid, dimBlock, sharedMemSize >> > (d_in, d_out, offset, gain);
hipDeviceSynchronize();
memset(origin_bytes, 0, memSize);
char* origin_archive = new char[memSize];
hipMemcpy(origin_archive, d_out, memSize, hipMemcpyDeviceToHost);
origin.set_audio(origin_archive);
hipFree(d_in);
hipFree(d_out);
}
else
{
char* origin_archive = new char[memSize];
std::memcpy((char*)origin_archive, (char*)origin_bytes, memSize);
for (int i = offset + 1; i < memSize; i++)
{
origin_bytes[i] = origin_archive[i] + origin_archive[i - offset] * gain;
}
}
}
| 5bbc61e4374d9cd1ecc420ed9af9a722ce168222.cu | #include "CUDAlib.cuh"
#include "WAVFilter.h"
#include <chrono>
#define BLOCK_SIZE 64
__global__ void FilterEchoBlock(char* d_in, char* d_out, long offset, double gain)
{
__shared__ int s_data[BLOCK_SIZE];
char d_data;
int dst_x = blockIdx.x * blockDim.x + threadIdx.x;
int src_x = dst_x - offset;
// Load one element per thread from device memory and store it
d_data = d_in[dst_x];
if (dst_x > offset)
s_data[threadIdx.x] = *(d_in+src_x);
else
s_data[threadIdx.x] = 0;
d_data += s_data[threadIdx.x] * gain;
d_out[dst_x] = d_data;
}
void FilterEcho(Audio_WAV& origin, bool useCUDA, double delay, double gain)
{
WAV_HEADER origin_header = origin.get_header();
size_t memSize = origin_header.Subchunk2Size;
char* origin_bytes = origin.get_audio();
unsigned long byteperSecond = origin_header.sampleRate * origin_header.blockAlign;
long offset = byteperSecond * delay;
if (useCUDA)
{
//pointer for device
char *d_in, *d_out;
int numBlocks = (memSize / BLOCK_SIZE) + 1; //celling
int sharedMemSize = BLOCK_SIZE; //one byte for each thread
cudaMalloc(&d_in, memSize);
cudaMalloc(&d_out, memSize);
cudaMemcpy(d_in, origin_bytes, memSize, cudaMemcpyHostToDevice);
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(BLOCK_SIZE);
FilterEchoBlock << < dimGrid, dimBlock, sharedMemSize >> > (d_in, d_out, offset, gain);
cudaThreadSynchronize();
memset(origin_bytes, 0, memSize);
char* origin_archive = new char[memSize];
cudaMemcpy(origin_archive, d_out, memSize, cudaMemcpyDeviceToHost);
origin.set_audio(origin_archive);
cudaFree(d_in);
cudaFree(d_out);
}
else
{
char* origin_archive = new char[memSize];
std::memcpy((char*)origin_archive, (char*)origin_bytes, memSize);
for (int i = offset + 1; i < memSize; i++)
{
origin_bytes[i] = origin_archive[i] + origin_archive[i - offset] * gain;
}
}
}
|
0776a5ee9ee1579c693bb29d8cf633a63829a50f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o cuda_crack cuda_crack.cu
./cuda_crack
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char plain_password[] = "KB5234";
char *a = attempt;
char *p = plain_password;
while(*a == *p) {
if(*a == '\0') {
return 1;
}
a++;
p++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
int w, a, s, d, g;
char password[6];
password[2] = '\0';
password[0] ='A' + threadIdx.x;
password[1] ='A' + blockIdx.x;
for(w = 0; w < 10; w++){
g = w + '0';
password[2] =g;
for(a = 0; a < 10; a++){
g = a + '0';
password[3] =g;
for(s = 0; s < 10; s++){
g = s + '0';
password[4] =g;
for(d = 0; d < 10; d++){
g = d + '0';
password[5] =g;
if(is_a_match(password)) {
printf("password found: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if (dn < 0) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed / 1.0e9));
hipLaunchKernelGGL(( kernel) , dim3(26), dim3(26), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| 0776a5ee9ee1579c693bb29d8cf633a63829a50f.cu | #include <stdio.h>
#include <cuda_runtime_api.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o cuda_crack cuda_crack.cu
./cuda_crack
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char plain_password[] = "KB5234";
char *a = attempt;
char *p = plain_password;
while(*a == *p) {
if(*a == '\0') {
return 1;
}
a++;
p++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
int w, a, s, d, g;
char password[6];
password[2] = '\0';
password[0] ='A' + threadIdx.x;
password[1] ='A' + blockIdx.x;
for(w = 0; w < 10; w++){
g = w + '0';
password[2] =g;
for(a = 0; a < 10; a++){
g = a + '0';
password[3] =g;
for(s = 0; s < 10; s++){
g = s + '0';
password[4] =g;
for(d = 0; d < 10; d++){
g = d + '0';
password[5] =g;
if(is_a_match(password)) {
printf("password found: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if (dn < 0) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed / 1.0e9));
kernel <<<26, 26>>>();
cudaThreadSynchronize();
return 0;
}
|
445096ecfd3df77832baf500a931bb237c7791cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MultiMarginCriterion.cu"
#else
// TODO: improve error messages
void THNN_(MultiMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
int p,
THCTensor *weights,
accreal margin_)
{
scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_);
THCUNN_assertSameGPU(state, 2, input, target);
input = THCTensor_(newContiguous)(state, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (THTensor_nDimensionLegacyNoScalars(input) == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
THCTensor_(resize1d)(state, output, 1);
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(input, 0),
reduction == Reduction::ElementwiseMean,
margin
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(input, 0),
reduction == Reduction::ElementwiseMean,
margin
);
}
THCudaCheck(hipGetLastError());
}
else if (input->dim() == 2)
{
int nframe = input->size(0);
THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3,
"inconsistent target size");
dim3 blocks(input->size(0));
dim3 threads(MULTIMARGIN_THREADS);
if (reduction == Reduction::None)
{
THCTensor_(resize1d)(state, output, input->size(0));
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
false,
margin
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
false,
margin
);
}
THCudaCheck(hipGetLastError());
}
else
{
THCTensor_(resize1d)(state, output, 1);
THCTensor *output_ = THCTensor_(newWithSize1d)(state, input->size(0)); // tmp output buffer
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
reduction == Reduction::ElementwiseMean,
margin
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
input->size(0), input->size(1),
reduction == Reduction::ElementwiseMean,
margin
);
}
THCudaCheck(hipGetLastError());
float sum = THCTensor_(sumall)(state, output_);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(sum));
THCTensor_(free)(state, output_);
}
}
else
{
AT_ERROR("non-empty vector or matrix expected, got sizes: ", input->sizes());
}
THCTensor_(free)(state, input);
if(weights)
THCTensor_(free)(state, weights);
}
void THNN_(MultiMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
int p,
THCTensor *weights,
accreal margin_)
{
scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_);
THCUNN_assertSameGPU(state, 3, input, gradInput, target);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (THTensor_nDimensionLegacyNoScalars(input) == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(gradInput, 0),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(gradInput, 0),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
THCudaCheck(hipGetLastError());
}
else if (input->dim() == 2)
{
int nframe = gradInput->size(0);
THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3,
"inconsistent target size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size(1),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size(1),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
THCudaCheck(hipGetLastError());
}
else
{
AT_ERROR("non-empty vector or matrix expected, got ", input->sizes());
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
if(weights)
THCTensor_(free)(state, weights);
}
#endif
| 445096ecfd3df77832baf500a931bb237c7791cc.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MultiMarginCriterion.cu"
#else
// TODO: improve error messages
void THNN_(MultiMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
int p,
THCTensor *weights,
accreal margin_)
{
scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_);
THCUNN_assertSameGPU(state, 2, input, target);
input = THCTensor_(newContiguous)(state, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (THTensor_nDimensionLegacyNoScalars(input) == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
THCTensor_(resize1d)(state, output, 1);
if (p == 1)
{
cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(input, 0),
reduction == Reduction::ElementwiseMean,
margin
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(input, 0),
reduction == Reduction::ElementwiseMean,
margin
);
}
THCudaCheck(cudaGetLastError());
}
else if (input->dim() == 2)
{
int nframe = input->size(0);
THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3,
"inconsistent target size");
dim3 blocks(input->size(0));
dim3 threads(MULTIMARGIN_THREADS);
if (reduction == Reduction::None)
{
THCTensor_(resize1d)(state, output, input->size(0));
if (p == 1)
{
cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
false,
margin
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
false,
margin
);
}
THCudaCheck(cudaGetLastError());
}
else
{
THCTensor_(resize1d)(state, output, 1);
THCTensor *output_ = THCTensor_(newWithSize1d)(state, input->size(0)); // tmp output buffer
if (p == 1)
{
cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
reduction == Reduction::ElementwiseMean,
margin
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
input->size(0), input->size(1),
reduction == Reduction::ElementwiseMean,
margin
);
}
THCudaCheck(cudaGetLastError());
float sum = THCTensor_(sumall)(state, output_);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(sum));
THCTensor_(free)(state, output_);
}
}
else
{
AT_ERROR("non-empty vector or matrix expected, got sizes: ", input->sizes());
}
THCTensor_(free)(state, input);
if(weights)
THCTensor_(free)(state, weights);
}
void THNN_(MultiMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
int p,
THCTensor *weights,
accreal margin_)
{
scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_);
THCUNN_assertSameGPU(state, 3, input, gradInput, target);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (THTensor_nDimensionLegacyNoScalars(input) == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(gradInput, 0),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(gradInput, 0),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
THCudaCheck(cudaGetLastError());
}
else if (input->dim() == 2)
{
int nframe = gradInput->size(0);
THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3,
"inconsistent target size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size(1),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size(1),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
THCudaCheck(cudaGetLastError());
}
else
{
AT_ERROR("non-empty vector or matrix expected, got ", input->sizes());
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
if(weights)
THCTensor_(free)(state, weights);
}
#endif
|
289ab75699353a565e1f4ecc7eaee6d47d6a65e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/// @file
////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// Copyright (C) 2016/17 Christian Lessig, Otto-von-Guericke Universitaet Magdeburg
///
////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// module : tutorial 5
///
/// author : lessig@isg.cs.ovgu.de
///
/// project : GPU Programming
///
/// description: CUDA convolution
///
////////////////////////////////////////////////////////////////////////////////////////////////////
// includes, system
#include <iostream>
#include <algorithm>
#include <chrono>
typedef std::chrono::time_point<std::chrono::high_resolution_clock> tpoint;
// includes, project
#include "cuda_util.h"
#include "kernel_separable.h"
#include "image.h"
// host implementation
extern void
convSeparableHost( float* kdata, const int& kernel_supp, const Image& image, Image& image_conv);
////////////////////////////////////////////////////////////////////////////////////////////////////
// convolution
////////////////////////////////////////////////////////////////////////////////////////////////////
template< int KernelSuppHalf >
__global__
void
convSeparable3( float* gkernel, float* image, float* image_conv, const unsigned int image_size) {
__shared__ float kernel[32];
if( (threadIdx.x < 2*KernelSuppHalf+1) && (0 == threadIdx.y) ) {
kernel[threadIdx.x] = gkernel[threadIdx.x];
}
__syncthreads();
int pixel_x = blockIdx.x * blockDim.x + threadIdx.x;
int pixel_y = blockIdx.y * blockDim.y + threadIdx.y;
KernelSeperable<KernelSuppHalf>* skernel = (KernelSeperable<KernelSuppHalf>*) kernel;
skernel->apply( pixel_x, pixel_y, image, image_conv, image_size);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// convolution
////////////////////////////////////////////////////////////////////////////////////////////////////
template< int KernelSuppHalf >
__global__
void
convSeparable2( float* gkernel, float* image, float* image_conv, const unsigned int image_size) {
__shared__ float kernel[32];
if( (threadIdx.x < 2*KernelSuppHalf+1) && (0 == threadIdx.y) ) {
kernel[threadIdx.x] = gkernel[threadIdx.x];
}
__syncthreads();
int pixel_x = blockIdx.x * blockDim.x + threadIdx.x;
int pixel_y = blockIdx.y * blockDim.y + threadIdx.y;
image_conv[pixel_x * image_size + pixel_y] = 0.0;
float weight_row = 0.0;
float weight = 0.0;
int ik = 0;
int jk = 0;
for( int i = pixel_x - KernelSuppHalf; i <= pixel_x + KernelSuppHalf; ++i, ++ik) {
weight_row = kernel[ik];
jk = 0;
for( int j = pixel_y - KernelSuppHalf; j <= pixel_y + KernelSuppHalf; ++j, ++jk) {
if( ( i < 0 || j < 0) || (i >= image_size) || (j >= image_size)) {
continue;
}
weight = weight_row * kernel[jk];
image_conv[pixel_x * image_size + pixel_y] += weight * image[i * image_size + j];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// convolution
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void
convSeparable1( float* kernel, const int kernel_supp_half,
float* image, float* image_conv, const unsigned int image_size) {
int pixel_x = blockIdx.x * blockDim.x + threadIdx.x;
int pixel_y = blockIdx.y * blockDim.y + threadIdx.y;
image_conv[pixel_x * image_size + pixel_y] = 0.0;
float weight_row = 0.0;
float weight = 0.0;
int ik = 0;
int jk = 0;
for( int i = pixel_x - kernel_supp_half; i <= pixel_x + kernel_supp_half; ++i, ++ik) {
weight_row = kernel[ik];
jk = 0;
for( int j = pixel_y - kernel_supp_half; j <= pixel_y + kernel_supp_half; ++j, ++jk) {
if( ( i < 0 || j < 0) || (i >= image_size) || (j >= image_size)) {
continue;
}
weight = weight_row * kernel[jk];
image_conv[pixel_x * image_size + pixel_y] += weight * image[i * image_size + j];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// initialize Cuda device
////////////////////////////////////////////////////////////////////////////////////////////////////
bool
initDevice( int& device_handle, int& max_threads_per_block) {
int deviceCount = 0;
checkErrorsCuda( hipGetDeviceCount(&deviceCount));
if( 0 == deviceCount) {
std::cerr << "initDevice() : No CUDA device found." << std::endl;
return false;
}
// one could implement more complex logic here to find the fastest device
if( deviceCount > 1) {
std::cerr << "initDevice() : Multiple CUDA devices found. Using first one." << std::endl;
}
// set the device
checkErrorsCuda( hipSetDevice( device_handle));
hipDeviceProp_t device_props;
checkErrorsCuda( hipGetDeviceProperties(&device_props, device_handle));
max_threads_per_block = device_props.maxThreadsPerBlock;
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// program entry point
////////////////////////////////////////////////////////////////////////////////////////////////////
int
main( int /*argc*/, char** /*argv*/ ) {
const int kernel_supp = 5;
const int kernel_supp_half = kernel_supp / 2;
float kdata[] = {0.0103339f, 0.207561f, 0.56421f, 0.207561f, 0.0103339f};
Image image( "../images/im.pgm");
Image image_conv( image.n_rows, image.n_cols);
convSeparableHost( kdata, kernel_supp_half, image, image_conv);
image_conv.write( "../images/im_conv_host.pgm");
// check execution environment
int device_handle = 0;
int max_threads_per_block = 0;
if( ! initDevice( device_handle, max_threads_per_block)) {
return EXIT_FAILURE;
}
// initialize memory
float* kernel_device = nullptr;
float* image_device = nullptr;
float* image_conv_device = nullptr;
// allocate device memory
checkErrorsCuda( hipMalloc((void **) &kernel_device, sizeof(float) * kernel_supp));
checkErrorsCuda( hipMalloc((void **) &image_device, sizeof(float) * image.n_cols * image.n_rows));
checkErrorsCuda( hipMalloc((void **) &image_conv_device, sizeof(float) * image.n_cols * image.n_rows));
// copy device memory
checkErrorsCuda( hipMemcpy( (void*) kernel_device, kdata,
sizeof(float) * kernel_supp,
hipMemcpyHostToDevice ));
checkErrorsCuda( hipMemcpy( (void*) image_device, &(image.data[0]),
sizeof(float) * image.n_cols * image.n_rows,
hipMemcpyHostToDevice ));
// determine thread layout
int max_threads_per_block_sqrt = std::sqrt( max_threads_per_block);
assert( max_threads_per_block_sqrt * max_threads_per_block_sqrt == max_threads_per_block);
dim3 num_threads_per_block( ::min( image.n_rows, max_threads_per_block_sqrt),
::min( image.n_cols, max_threads_per_block_sqrt) );
dim3 num_blocks( image.n_rows / num_threads_per_block.x, image.n_cols / num_threads_per_block.y);
if( 0 == num_blocks.x) {
num_blocks.x++;
}
if( 0 == num_blocks.y) {
num_blocks.y++;
}
std::cout << "num_blocks = " << num_blocks.x << " / " << num_blocks.y << std::endl;
std::cout << "num_threads_per_block = " << num_threads_per_block.x << " / "
<< num_threads_per_block.y << std::endl;
// run kernel
assert( image.n_rows == image.n_cols);
tpoint t_start = std::chrono::high_resolution_clock::now();
#if 0
hipLaunchKernelGGL(( convSeparable1), dim3(num_blocks) , dim3(num_threads_per_block) , 0, 0, kernel_device, kernel_supp_half, image_device,
image_conv_device, image.n_rows);
#endif
#if 0
hipLaunchKernelGGL(( convSeparable2<kernel_supp_half>), dim3(num_blocks) , dim3(num_threads_per_block) , 0, 0, kernel_device, image_device,
image_conv_device, image.n_rows);
#endif
hipLaunchKernelGGL(( convSeparable3<kernel_supp_half>), dim3(num_blocks) , dim3(num_threads_per_block) , 0, 0, kernel_device, image_device,
image_conv_device, image.n_rows);
tpoint t_end = std::chrono::high_resolution_clock::now();
double wall_clock = std::chrono::duration<double, std::milli>(t_end-t_start).count();
std::cerr << "Execution time: " << wall_clock << " ms."<< std::endl;
checkLastCudaError("Kernel execution failed");
hipDeviceSynchronize();
// copy result back to host
checkErrorsCuda( hipMemcpy( &image_conv.data[0], image_conv_device,
sizeof(float) * image.n_cols * image.n_rows,
hipMemcpyDeviceToHost ));
// write result
image_conv.write( "../images/im_conv_device.pgm");
// clean up device memory
checkErrorsCuda( hipFree( kernel_device));
checkErrorsCuda( hipFree( image_device));
checkErrorsCuda( hipFree( image_conv_device));
return EXIT_SUCCESS;
}
| 289ab75699353a565e1f4ecc7eaee6d47d6a65e6.cu | /// @file
////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// Copyright (C) 2016/17 Christian Lessig, Otto-von-Guericke Universitaet Magdeburg
///
////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// module : tutorial 5
///
/// author : lessig@isg.cs.ovgu.de
///
/// project : GPU Programming
///
/// description: CUDA convolution
///
////////////////////////////////////////////////////////////////////////////////////////////////////
// includes, system
#include <iostream>
#include <algorithm>
#include <chrono>
typedef std::chrono::time_point<std::chrono::high_resolution_clock> tpoint;
// includes, project
#include "cuda_util.h"
#include "kernel_separable.h"
#include "image.h"
// host implementation
extern void
convSeparableHost( float* kdata, const int& kernel_supp, const Image& image, Image& image_conv);
////////////////////////////////////////////////////////////////////////////////////////////////////
// convolution
////////////////////////////////////////////////////////////////////////////////////////////////////
template< int KernelSuppHalf >
__global__
void
convSeparable3( float* gkernel, float* image, float* image_conv, const unsigned int image_size) {
__shared__ float kernel[32];
if( (threadIdx.x < 2*KernelSuppHalf+1) && (0 == threadIdx.y) ) {
kernel[threadIdx.x] = gkernel[threadIdx.x];
}
__syncthreads();
int pixel_x = blockIdx.x * blockDim.x + threadIdx.x;
int pixel_y = blockIdx.y * blockDim.y + threadIdx.y;
KernelSeperable<KernelSuppHalf>* skernel = (KernelSeperable<KernelSuppHalf>*) kernel;
skernel->apply( pixel_x, pixel_y, image, image_conv, image_size);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// convolution
////////////////////////////////////////////////////////////////////////////////////////////////////
template< int KernelSuppHalf >
__global__
void
convSeparable2( float* gkernel, float* image, float* image_conv, const unsigned int image_size) {
__shared__ float kernel[32];
if( (threadIdx.x < 2*KernelSuppHalf+1) && (0 == threadIdx.y) ) {
kernel[threadIdx.x] = gkernel[threadIdx.x];
}
__syncthreads();
int pixel_x = blockIdx.x * blockDim.x + threadIdx.x;
int pixel_y = blockIdx.y * blockDim.y + threadIdx.y;
image_conv[pixel_x * image_size + pixel_y] = 0.0;
float weight_row = 0.0;
float weight = 0.0;
int ik = 0;
int jk = 0;
for( int i = pixel_x - KernelSuppHalf; i <= pixel_x + KernelSuppHalf; ++i, ++ik) {
weight_row = kernel[ik];
jk = 0;
for( int j = pixel_y - KernelSuppHalf; j <= pixel_y + KernelSuppHalf; ++j, ++jk) {
if( ( i < 0 || j < 0) || (i >= image_size) || (j >= image_size)) {
continue;
}
weight = weight_row * kernel[jk];
image_conv[pixel_x * image_size + pixel_y] += weight * image[i * image_size + j];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// convolution
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void
convSeparable1( float* kernel, const int kernel_supp_half,
float* image, float* image_conv, const unsigned int image_size) {
int pixel_x = blockIdx.x * blockDim.x + threadIdx.x;
int pixel_y = blockIdx.y * blockDim.y + threadIdx.y;
image_conv[pixel_x * image_size + pixel_y] = 0.0;
float weight_row = 0.0;
float weight = 0.0;
int ik = 0;
int jk = 0;
for( int i = pixel_x - kernel_supp_half; i <= pixel_x + kernel_supp_half; ++i, ++ik) {
weight_row = kernel[ik];
jk = 0;
for( int j = pixel_y - kernel_supp_half; j <= pixel_y + kernel_supp_half; ++j, ++jk) {
if( ( i < 0 || j < 0) || (i >= image_size) || (j >= image_size)) {
continue;
}
weight = weight_row * kernel[jk];
image_conv[pixel_x * image_size + pixel_y] += weight * image[i * image_size + j];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// initialize Cuda device
////////////////////////////////////////////////////////////////////////////////////////////////////
bool
initDevice( int& device_handle, int& max_threads_per_block) {
int deviceCount = 0;
checkErrorsCuda( cudaGetDeviceCount(&deviceCount));
if( 0 == deviceCount) {
std::cerr << "initDevice() : No CUDA device found." << std::endl;
return false;
}
// one could implement more complex logic here to find the fastest device
if( deviceCount > 1) {
std::cerr << "initDevice() : Multiple CUDA devices found. Using first one." << std::endl;
}
// set the device
checkErrorsCuda( cudaSetDevice( device_handle));
cudaDeviceProp device_props;
checkErrorsCuda( cudaGetDeviceProperties(&device_props, device_handle));
max_threads_per_block = device_props.maxThreadsPerBlock;
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// program entry point
////////////////////////////////////////////////////////////////////////////////////////////////////
int
main( int /*argc*/, char** /*argv*/ ) {
const int kernel_supp = 5;
const int kernel_supp_half = kernel_supp / 2;
float kdata[] = {0.0103339f, 0.207561f, 0.56421f, 0.207561f, 0.0103339f};
Image image( "../images/im.pgm");
Image image_conv( image.n_rows, image.n_cols);
convSeparableHost( kdata, kernel_supp_half, image, image_conv);
image_conv.write( "../images/im_conv_host.pgm");
// check execution environment
int device_handle = 0;
int max_threads_per_block = 0;
if( ! initDevice( device_handle, max_threads_per_block)) {
return EXIT_FAILURE;
}
// initialize memory
float* kernel_device = nullptr;
float* image_device = nullptr;
float* image_conv_device = nullptr;
// allocate device memory
checkErrorsCuda( cudaMalloc((void **) &kernel_device, sizeof(float) * kernel_supp));
checkErrorsCuda( cudaMalloc((void **) &image_device, sizeof(float) * image.n_cols * image.n_rows));
checkErrorsCuda( cudaMalloc((void **) &image_conv_device, sizeof(float) * image.n_cols * image.n_rows));
// copy device memory
checkErrorsCuda( cudaMemcpy( (void*) kernel_device, kdata,
sizeof(float) * kernel_supp,
cudaMemcpyHostToDevice ));
checkErrorsCuda( cudaMemcpy( (void*) image_device, &(image.data[0]),
sizeof(float) * image.n_cols * image.n_rows,
cudaMemcpyHostToDevice ));
// determine thread layout
int max_threads_per_block_sqrt = std::sqrt( max_threads_per_block);
assert( max_threads_per_block_sqrt * max_threads_per_block_sqrt == max_threads_per_block);
dim3 num_threads_per_block( std::min( image.n_rows, max_threads_per_block_sqrt),
std::min( image.n_cols, max_threads_per_block_sqrt) );
dim3 num_blocks( image.n_rows / num_threads_per_block.x, image.n_cols / num_threads_per_block.y);
if( 0 == num_blocks.x) {
num_blocks.x++;
}
if( 0 == num_blocks.y) {
num_blocks.y++;
}
std::cout << "num_blocks = " << num_blocks.x << " / " << num_blocks.y << std::endl;
std::cout << "num_threads_per_block = " << num_threads_per_block.x << " / "
<< num_threads_per_block.y << std::endl;
// run kernel
assert( image.n_rows == image.n_cols);
tpoint t_start = std::chrono::high_resolution_clock::now();
#if 0
convSeparable1<<< num_blocks , num_threads_per_block >>>( kernel_device, kernel_supp_half, image_device,
image_conv_device, image.n_rows);
#endif
#if 0
convSeparable2<kernel_supp_half><<< num_blocks , num_threads_per_block >>>( kernel_device, image_device,
image_conv_device, image.n_rows);
#endif
convSeparable3<kernel_supp_half><<< num_blocks , num_threads_per_block >>>( kernel_device, image_device,
image_conv_device, image.n_rows);
tpoint t_end = std::chrono::high_resolution_clock::now();
double wall_clock = std::chrono::duration<double, std::milli>(t_end-t_start).count();
std::cerr << "Execution time: " << wall_clock << " ms."<< std::endl;
checkLastCudaError("Kernel execution failed");
cudaDeviceSynchronize();
// copy result back to host
checkErrorsCuda( cudaMemcpy( &image_conv.data[0], image_conv_device,
sizeof(float) * image.n_cols * image.n_rows,
cudaMemcpyDeviceToHost ));
// write result
image_conv.write( "../images/im_conv_device.pgm");
// clean up device memory
checkErrorsCuda( cudaFree( kernel_device));
checkErrorsCuda( cudaFree( image_device));
checkErrorsCuda( cudaFree( image_conv_device));
return EXIT_SUCCESS;
}
|
50437ad3ebdedb6db27747a8342a05093a5a3a85.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/hip/HIPBlas.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
IntArrayRef tensor_sizes = tensor.sizes();
if ((tensor_strides[0] == 1) && (tensor_strides[1] >= std::max<int64_t>(1, tensor_sizes[0]))) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] >= std::max<int64_t>(1, tensor_sizes[1]))) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
namespace {
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(mat1.dim() == 2 && mat2.dim() == 2, "tensors must be 2-D");
TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {mat1, "mat1", 2}, {mat2, "mat2", 3}};
checkAllSameGPU("addmm", args);
Tensor self_;
if (&result != &self) {
std::tie(self_) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
} else {
self_ = self;
}
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self__sizes = self_.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self__sizes[0] == mat1_sizes[0], "self_ dim 0 must match mat1 dim 0");
TORCH_CHECK(self__sizes[1] == mat2_sizes[1], "self_ dim 1 must match mat2 dim 1");
if (&result != &self) {
at::native::resize_as_(result, self_);
if (beta.toComplexDouble() != 0.0) {
at::native::copy_(result, self_);
}
}
TORCH_CHECK(result.dim() == 2 && self_.dim() == 2, "tensors must be 2-D");
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self_.scalar_type();
if (mat1.numel() == 0) {
// By definition, when beta==0, values in self should be ignored. nans and infs
// should not propagate
if (beta.toComplexDouble() == 0.) {
return result.zero_();
}
return at::native::mul_out(result, self, at::native::scalar_tensor(beta, at::device(at::kCPU).dtype(self.scalar_type())));
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
} // anonymous namespace
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor& addmm_out_cuda(Tensor &out, const Tensor &self,
const Tensor &mat1, const Tensor &mat2,
Scalar beta, Scalar alpha) {
{
at::NoNamesGuard guard;
Tensor& result = addmm_out_cuda_impl(out, self, mat1, mat2, beta, alpha);
}
at::namedinference::propagate_names_for_addmm(out, mat1, mat2, self);
return out;
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addmm_out_cuda(out, self, mat1, mat2, beta, alpha);
return out;
}
Tensor& addmm__cuda(Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
addmm_out_cuda(self, self, mat1, mat2, beta, alpha);
return self;
}
template<typename scalar_t>
void addr_impl_ger_cuda(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
scalar_t alpha, scalar_t beta) {
static_assert(std::is_same<scalar_t, float>::value ||
std::is_same<scalar_t, double>::value,
"addr_impl_ger_cuda: only float and double are supported");
if (&out != &self) {
at::native::resize_as_(out, self);
at::native::copy_(out, self);
}
if (beta == 0.0) {
at::native::zero_(out);
}
if (beta != 1.0) {
at::native::mul_(out, beta);
}
if (out.stride(0) == 1) {
at::cuda::blas::ger<scalar_t>(
vec1.size(0), vec2.size(0), alpha,
vec1.data_ptr<scalar_t>(), vec1.stride(0),
vec2.data_ptr<scalar_t>(), vec2.stride(0),
out.data_ptr<scalar_t>(), out.stride(1)
);
} else if (out.stride(1) == 1) {
at::cuda::blas::ger<scalar_t>(
vec2.size(0), vec1.size(0), alpha,
vec2.data_ptr<scalar_t>(), vec2.stride(0),
vec1.data_ptr<scalar_t>(), vec1.stride(0),
out.data_ptr<scalar_t>(), out.stride(0)
);
} else {
Tensor cr = out.clone();
at::cuda::blas::ger<scalar_t>(
vec2.size(0), vec1.size(0), alpha,
vec2.data_ptr<scalar_t>(), vec2.stride(0),
vec1.data_ptr<scalar_t>(), vec1.stride(0),
out.data_ptr<scalar_t>(), out.stride(0)
);
out.set_(cr);
}
}
template<typename scalar_t>
void addr_impl_cuda(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
scalar_t alpha, scalar_t beta) {
// currently no Hger/SgerEx in Cublas.
Tensor vec2T = vec2.reshape({1, vec2.size(0)});
Tensor vec1M = vec1.reshape({vec1.size(0), 1});
addmm_out_cuda(out, self, vec1M, vec2T, beta, alpha);
}
template<>
void addr_impl_cuda<float>(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
float alpha, float beta) {
addr_impl_ger_cuda<float>(out, self, vec1, vec2, alpha, beta);
}
template<>
void addr_impl_cuda<double>(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
double alpha, double beta) {
addr_impl_ger_cuda<double>(out, self, vec1, vec2, alpha, beta);
}
Tensor& addr_out_cuda(Tensor &out, const Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(vec1.dim() == 1 && vec2.dim() == 1,
"vec1 and vec2 should be 1-dimensional vectors. Got dimensions ",
vec1.dim(), " and ", vec2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {vec1.size(0), vec2.size(0)}, "addr");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == vec1.device() &&
out.device() == vec2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
vec1.device(), " and ", vec2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
TORCH_CHECK(self_.size(0) == vec1.size(0) && self_.size(1) == vec2.size(0),
"size mismatch",
", input: ", self_.sizes(),
", v1: ", vec1.sizes(),
", v2: ", vec2.sizes());
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self_.scalar_type(), "addr_out_cuda", [&] {
addr_impl_cuda<scalar_t>(out, self_, vec1, vec2,
alpha.to<scalar_t>(), beta.to<scalar_t>());
});
return out;
}
Tensor& addr__cuda(Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
addr_out_cuda(self, self, vec1, vec2, beta, alpha);
return self;
}
Tensor addr_cuda(const Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addr_out_cuda(out, self, vec1, vec2, beta, alpha);
return out;
}
Tensor& addbmm_out_cuda(Tensor& out, const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(batch1.dim() == 3 && batch2.dim() == 3,
"Batch tensors should be 3D, got dimensions ", batch1.dim(),
" and ", batch2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == batch1.device() &&
out.device() == batch2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
batch1.device(), " and ", batch2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
int64_t batchnum = batch1.size(0);
int64_t m1d1 = batch1.size(1);
int64_t innerdim = batch1.size(2);
int64_t m2d2 = batch2.size(2);
TORCH_CHECK(batchnum == batch2.size(0),
"equal number of batches expected");
TORCH_CHECK(m1d1 == self_.size(0),
"first dimension of batch1 must match first dimension of input");
TORCH_CHECK(m2d2 == self_.size(1),
"second dimension of batch2 must match second dimension of input");
TORCH_CHECK(innerdim == batch2.size(1),
"second dimension of batch1 must match first dimension of batch2");
if (&out != &self) {
at::native::resize_as_(out, self_);
if (beta.to<double>() != 0.0) {
at::native::copy_(out, self_);
}
}
for (int64_t i=0; i<batchnum; i++) {
addmm_out_cuda(out, out, batch1[i], batch2[i], beta, alpha);
beta = 1;
}
return out;
}
Tensor& addbmm__cuda(Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
addbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
return self;
}
Tensor addbmm_cuda(const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha)
{
Tensor out = at::empty({0}, self.options());
addbmm_out_cuda(out, self, batch1, batch2, beta, alpha);
return out;
}
namespace {
inline void dot_check(const Tensor& self, const Tensor& other) {
TORCH_CHECK(
self.dim() == 1 && other.dim() == 1,
"1D tensors expected, but got ",
self.dim(),
"D and ",
other.dim(),
"D tensors");
TORCH_CHECK(
self.scalar_type() == other.scalar_type(),
"dot : expected both vectors to have same dtype, but found ",
self.scalar_type(),
" and ",
other.scalar_type());
TORCH_CHECK(
self.numel() == other.numel(),
"inconsistent tensor size, expected tensor [",
self.numel(),
"] and src [",
other.numel(),
"] to have the same number of elements, but got ",
self.numel(),
" and ",
other.numel(),
" elements respectively");
TORCH_CHECK(
self.device() == other.device(),
"expected all tensors to be on the same device. Found: ",
self.device(),
", ",
other.device());
TORCH_CHECK(
(self.numel() <= INT_MAX) && (self.stride(0) <= INT_MAX) &&
(other.stride(0) <= INT_MAX),
"dot only supports n, incx, incy with the bound [val] <= %d",
INT_MAX);
}
} // anonymous namespace
Tensor dot_cuda(const Tensor& self, const Tensor& other) {
at::NoNamesGuard guard;
dot_check(self, other);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, self.scalar_type(), "dot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(handle, HIPBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::dot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
Tensor vdot_cuda(const Tensor& self, const Tensor& other) {
if (!self.is_complex()) {
return dot_cuda(self, other);
}
at::NoNamesGuard guard;
dot_check(self, other);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_COMPLEX_TYPES(self.scalar_type(), "vdot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(
handle, HIPBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::vdot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
} }
| 50437ad3ebdedb6db27747a8342a05093a5a3a85.cu | #include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/cuda/CUDABlas.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
IntArrayRef tensor_sizes = tensor.sizes();
if ((tensor_strides[0] == 1) && (tensor_strides[1] >= std::max<int64_t>(1, tensor_sizes[0]))) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] >= std::max<int64_t>(1, tensor_sizes[1]))) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
namespace {
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(mat1.dim() == 2 && mat2.dim() == 2, "tensors must be 2-D");
TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {mat1, "mat1", 2}, {mat2, "mat2", 3}};
checkAllSameGPU("addmm", args);
Tensor self_;
if (&result != &self) {
std::tie(self_) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
} else {
self_ = self;
}
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self__sizes = self_.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self__sizes[0] == mat1_sizes[0], "self_ dim 0 must match mat1 dim 0");
TORCH_CHECK(self__sizes[1] == mat2_sizes[1], "self_ dim 1 must match mat2 dim 1");
if (&result != &self) {
at::native::resize_as_(result, self_);
if (beta.toComplexDouble() != 0.0) {
at::native::copy_(result, self_);
}
}
TORCH_CHECK(result.dim() == 2 && self_.dim() == 2, "tensors must be 2-D");
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self_.scalar_type();
if (mat1.numel() == 0) {
// By definition, when beta==0, values in self should be ignored. nans and infs
// should not propagate
if (beta.toComplexDouble() == 0.) {
return result.zero_();
}
return at::native::mul_out(result, self, at::native::scalar_tensor(beta, at::device(at::kCPU).dtype(self.scalar_type())));
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
} // anonymous namespace
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor& addmm_out_cuda(Tensor &out, const Tensor &self,
const Tensor &mat1, const Tensor &mat2,
Scalar beta, Scalar alpha) {
{
at::NoNamesGuard guard;
Tensor& result = addmm_out_cuda_impl(out, self, mat1, mat2, beta, alpha);
}
at::namedinference::propagate_names_for_addmm(out, mat1, mat2, self);
return out;
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addmm_out_cuda(out, self, mat1, mat2, beta, alpha);
return out;
}
Tensor& addmm__cuda(Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
addmm_out_cuda(self, self, mat1, mat2, beta, alpha);
return self;
}
template<typename scalar_t>
void addr_impl_ger_cuda(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
scalar_t alpha, scalar_t beta) {
static_assert(std::is_same<scalar_t, float>::value ||
std::is_same<scalar_t, double>::value,
"addr_impl_ger_cuda: only float and double are supported");
if (&out != &self) {
at::native::resize_as_(out, self);
at::native::copy_(out, self);
}
if (beta == 0.0) {
at::native::zero_(out);
}
if (beta != 1.0) {
at::native::mul_(out, beta);
}
if (out.stride(0) == 1) {
at::cuda::blas::ger<scalar_t>(
vec1.size(0), vec2.size(0), alpha,
vec1.data_ptr<scalar_t>(), vec1.stride(0),
vec2.data_ptr<scalar_t>(), vec2.stride(0),
out.data_ptr<scalar_t>(), out.stride(1)
);
} else if (out.stride(1) == 1) {
at::cuda::blas::ger<scalar_t>(
vec2.size(0), vec1.size(0), alpha,
vec2.data_ptr<scalar_t>(), vec2.stride(0),
vec1.data_ptr<scalar_t>(), vec1.stride(0),
out.data_ptr<scalar_t>(), out.stride(0)
);
} else {
Tensor cr = out.clone();
at::cuda::blas::ger<scalar_t>(
vec2.size(0), vec1.size(0), alpha,
vec2.data_ptr<scalar_t>(), vec2.stride(0),
vec1.data_ptr<scalar_t>(), vec1.stride(0),
out.data_ptr<scalar_t>(), out.stride(0)
);
out.set_(cr);
}
}
template<typename scalar_t>
void addr_impl_cuda(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
scalar_t alpha, scalar_t beta) {
// currently no Hger/SgerEx in Cublas.
Tensor vec2T = vec2.reshape({1, vec2.size(0)});
Tensor vec1M = vec1.reshape({vec1.size(0), 1});
addmm_out_cuda(out, self, vec1M, vec2T, beta, alpha);
}
template<>
void addr_impl_cuda<float>(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
float alpha, float beta) {
addr_impl_ger_cuda<float>(out, self, vec1, vec2, alpha, beta);
}
template<>
void addr_impl_cuda<double>(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
double alpha, double beta) {
addr_impl_ger_cuda<double>(out, self, vec1, vec2, alpha, beta);
}
Tensor& addr_out_cuda(Tensor &out, const Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(vec1.dim() == 1 && vec2.dim() == 1,
"vec1 and vec2 should be 1-dimensional vectors. Got dimensions ",
vec1.dim(), " and ", vec2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {vec1.size(0), vec2.size(0)}, "addr");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == vec1.device() &&
out.device() == vec2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
vec1.device(), " and ", vec2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
TORCH_CHECK(self_.size(0) == vec1.size(0) && self_.size(1) == vec2.size(0),
"size mismatch",
", input: ", self_.sizes(),
", v1: ", vec1.sizes(),
", v2: ", vec2.sizes());
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self_.scalar_type(), "addr_out_cuda", [&] {
addr_impl_cuda<scalar_t>(out, self_, vec1, vec2,
alpha.to<scalar_t>(), beta.to<scalar_t>());
});
return out;
}
Tensor& addr__cuda(Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
addr_out_cuda(self, self, vec1, vec2, beta, alpha);
return self;
}
Tensor addr_cuda(const Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addr_out_cuda(out, self, vec1, vec2, beta, alpha);
return out;
}
Tensor& addbmm_out_cuda(Tensor& out, const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(batch1.dim() == 3 && batch2.dim() == 3,
"Batch tensors should be 3D, got dimensions ", batch1.dim(),
" and ", batch2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == batch1.device() &&
out.device() == batch2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
batch1.device(), " and ", batch2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
int64_t batchnum = batch1.size(0);
int64_t m1d1 = batch1.size(1);
int64_t innerdim = batch1.size(2);
int64_t m2d2 = batch2.size(2);
TORCH_CHECK(batchnum == batch2.size(0),
"equal number of batches expected");
TORCH_CHECK(m1d1 == self_.size(0),
"first dimension of batch1 must match first dimension of input");
TORCH_CHECK(m2d2 == self_.size(1),
"second dimension of batch2 must match second dimension of input");
TORCH_CHECK(innerdim == batch2.size(1),
"second dimension of batch1 must match first dimension of batch2");
if (&out != &self) {
at::native::resize_as_(out, self_);
if (beta.to<double>() != 0.0) {
at::native::copy_(out, self_);
}
}
for (int64_t i=0; i<batchnum; i++) {
addmm_out_cuda(out, out, batch1[i], batch2[i], beta, alpha);
beta = 1;
}
return out;
}
Tensor& addbmm__cuda(Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
addbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
return self;
}
Tensor addbmm_cuda(const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha)
{
Tensor out = at::empty({0}, self.options());
addbmm_out_cuda(out, self, batch1, batch2, beta, alpha);
return out;
}
namespace {
inline void dot_check(const Tensor& self, const Tensor& other) {
TORCH_CHECK(
self.dim() == 1 && other.dim() == 1,
"1D tensors expected, but got ",
self.dim(),
"D and ",
other.dim(),
"D tensors");
TORCH_CHECK(
self.scalar_type() == other.scalar_type(),
"dot : expected both vectors to have same dtype, but found ",
self.scalar_type(),
" and ",
other.scalar_type());
TORCH_CHECK(
self.numel() == other.numel(),
"inconsistent tensor size, expected tensor [",
self.numel(),
"] and src [",
other.numel(),
"] to have the same number of elements, but got ",
self.numel(),
" and ",
other.numel(),
" elements respectively");
TORCH_CHECK(
self.device() == other.device(),
"expected all tensors to be on the same device. Found: ",
self.device(),
", ",
other.device());
TORCH_CHECK(
(self.numel() <= INT_MAX) && (self.stride(0) <= INT_MAX) &&
(other.stride(0) <= INT_MAX),
"dot only supports n, incx, incy with the bound [val] <= %d",
INT_MAX);
}
} // anonymous namespace
Tensor dot_cuda(const Tensor& self, const Tensor& other) {
at::NoNamesGuard guard;
dot_check(self, other);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, self.scalar_type(), "dot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(handle, CUBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::dot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
Tensor vdot_cuda(const Tensor& self, const Tensor& other) {
if (!self.is_complex()) {
return dot_cuda(self, other);
}
at::NoNamesGuard guard;
dot_check(self, other);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_COMPLEX_TYPES(self.scalar_type(), "vdot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(
handle, CUBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::vdot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
} }
|
63546100d7a9d0638ff72040d452577658cda930.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define NUMTHREADS 16
#define THREADWORK 32
__global__ void gpuKendall(const float * a, size_t na,
const float * b, size_t nb, size_t sampleSize, double * results)
{
size_t
i, j, tests,
tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y,
rowa = bx * sampleSize, rowb = by * sampleSize;
float
discordant, concordant = 0.f,
numer, denom;
__shared__ float threadSums[NUMTHREADS*NUMTHREADS];
for(i = tx; i < sampleSize; i += NUMTHREADS) {
for(j = i+1+ty; j < sampleSize; j += NUMTHREADS) {
tests = ((a[rowa+j] > a[rowa+i]) && (b[rowb+j] > b[rowb+i]))
+ ((a[rowa+j] < a[rowa+i]) && (b[rowb+j] < b[rowb+i]))
+ ((a[rowa+j] == a[rowa+i]) && (b[rowb+j] == b[rowb+i]));
concordant = concordant + (float)tests;
}
}
threadSums[tx*NUMTHREADS+ty] = concordant;
__syncthreads();
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if(ty < i)
threadSums[tx*NUMTHREADS+ty] += threadSums[tx*NUMTHREADS+ty+i];
__syncthreads();
}
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if((tx < i) && (ty == 0))
threadSums[tx*NUMTHREADS] += threadSums[(tx+i)*NUMTHREADS];
__syncthreads();
}
if((tx == 0) && (ty == 0)) {
concordant = threadSums[0];
denom = (float)sampleSize;
denom = (denom * (denom - 1.f)) / 2.f; discordant = denom - concordant;
numer = concordant - discordant;
results[by*na+bx] = ((double)numer)/((double)denom);
}
}
| 63546100d7a9d0638ff72040d452577658cda930.cu | #define NUMTHREADS 16
#define THREADWORK 32
__global__ void gpuKendall(const float * a, size_t na,
const float * b, size_t nb, size_t sampleSize, double * results)
{
size_t
i, j, tests,
tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y,
rowa = bx * sampleSize, rowb = by * sampleSize;
float
discordant, concordant = 0.f,
numer, denom;
__shared__ float threadSums[NUMTHREADS*NUMTHREADS];
for(i = tx; i < sampleSize; i += NUMTHREADS) {
for(j = i+1+ty; j < sampleSize; j += NUMTHREADS) {
tests = ((a[rowa+j] > a[rowa+i]) && (b[rowb+j] > b[rowb+i]))
+ ((a[rowa+j] < a[rowa+i]) && (b[rowb+j] < b[rowb+i]))
+ ((a[rowa+j] == a[rowa+i]) && (b[rowb+j] == b[rowb+i]));
concordant = concordant + (float)tests;
}
}
threadSums[tx*NUMTHREADS+ty] = concordant;
__syncthreads();
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if(ty < i)
threadSums[tx*NUMTHREADS+ty] += threadSums[tx*NUMTHREADS+ty+i];
__syncthreads();
}
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if((tx < i) && (ty == 0))
threadSums[tx*NUMTHREADS] += threadSums[(tx+i)*NUMTHREADS];
__syncthreads();
}
if((tx == 0) && (ty == 0)) {
concordant = threadSums[0];
denom = (float)sampleSize;
denom = (denom * (denom - 1.f)) / 2.f; discordant = denom - concordant;
numer = concordant - discordant;
results[by*na+bx] = ((double)numer)/((double)denom);
}
}
|
ff4de01b408b845547a4b5a05a9884d28d6773d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by przemo on 27.12.2019.
//
#include "FuseLayer.h"
namespace NeuralNetworkGPU
{
/*
*
*/
__global__ void determineOutputFuncFuse(float *t_input1, int *t_inputSize1, float *t_input2, int *t_inputSize2,
float *t_output,
float *t_deltas)
{
//Just copy from 2 sources
long index = threadIdx.x + blockIdx.x*blockDim.x;
t_deltas[index] = 0;
if(index < *t_inputSize1)
{
t_output[index] = t_input1[index];
}
else
{
t_output[index] = t_input2[index-*t_inputSize1];
}
}
/*
*
*/
__global__ void learnFuncFuse(float *t_input1, int *t_inputSize1, float *t_input2, int *t_inputSize2,
float *t_output,
float *t_deltas, float *t_prevDeltas1, float *t_prevDeltas2)
{
long index = threadIdx.x + blockIdx.x*blockDim.x;
//set delta to deeper neurons
if(index < *t_inputSize1)
{
if(t_prevDeltas1 != nullptr) t_prevDeltas1[index] = t_deltas[index];
}
else
{
if(t_prevDeltas2 != nullptr) t_prevDeltas2[index-*t_inputSize1] = t_deltas[index];
}
//reset delta
t_deltas[index] = 0;
}
/*
*
*/
FuseLayer::FuseLayer(NeuronsPtr t_prevLayerReference1, NeuronsPtr t_prevLayerReference2)
{
size = t_prevLayerReference1.size + t_prevLayerReference2.size;
de_input1 = t_prevLayerReference1.inputPtr;
de_input2 = t_prevLayerReference2.inputPtr;
idFusedLayer1 = t_prevLayerReference1.id;
idFusedLayer2 = t_prevLayerReference2.id;
//Input/output
hipMalloc( (void **) &d_inputSize1, sizeof(int));
hipMemcpy(d_inputSize1, &(t_prevLayerReference1.size), sizeof(int), hipMemcpyHostToDevice);
inputSize1 = t_prevLayerReference1.size;
hipMalloc( (void **) &d_inputSize2, sizeof(int));
hipMemcpy(d_inputSize2, &(t_prevLayerReference2.size), sizeof(int), hipMemcpyHostToDevice);
inputSize2 = t_prevLayerReference2.size;
hipMalloc( (void **) &d_output, sizeof(float)*size);
output = (float*) std::malloc(sizeof(float)*size);
//basic to learn
hipMalloc( (void **) &d_deltas, sizeof(float)*size);
deltas = (float*) malloc(sizeof(float)*size);
de_prevDeltas1 = t_prevLayerReference1.deltaPtr;
de_prevDeltas2 = t_prevLayerReference2.deltaPtr;
// split to blocks
numberOfBlocks = 1;
while(1)
{
numberOfThreads = size/numberOfBlocks;
if(numberOfThreads<=800 && numberOfThreads*numberOfBlocks==size) break;
numberOfBlocks++;
if(numberOfBlocks > 20 )
{
std::cout << "1 Layer size: "<< t_prevLayerReference1.size << "\n";
std::cout << "2 Layer size: "<< t_prevLayerReference2.size << "\n";
assert(numberOfBlocks < 20 && "Could not match thread/block size");
}
}
}
/*
*
*/
FuseLayer::~FuseLayer()
{
hipFree(d_inputSize1);
hipFree(d_inputSize2);
hipFree(d_output);
hipFree(d_deltas);
free(output);
free(deltas);
}
/*
*
*/
std::vector<double> FuseLayer::getOutput()
{
hipMemcpy(output, d_output, sizeof(float)*size, hipMemcpyDeviceToHost);
std::vector<double> result;
for(int i=0; i<size; i++ )
{
double v = output[i];
result.push_back(v);
}
return result;
}
void FuseLayer::determineOutput()
{
hipLaunchKernelGGL(( determineOutputFuncFuse), dim3(numberOfThreads) , dim3(numberOfBlocks) , 0, 0, de_input1, d_inputSize1, de_input2, d_inputSize2,
d_output,
d_deltas);
}
void FuseLayer::learnSGD()
{
// int64 timeBefore = cv::getTickCount();
hipLaunchKernelGGL(( learnFuncFuse), dim3(numberOfThreads) , dim3(numberOfBlocks) , 0, 0, de_input1, d_inputSize1, de_input2, d_inputSize2,
d_output,
d_deltas, de_prevDeltas1, de_prevDeltas2);
// int64 afterBefore = cv::getTickCount();
// std::cout << "Sigm: " << (afterBefore - timeBefore)/ cv::getTickFrequency() << "\n";
}
void FuseLayer::learnAdam()
{
// int64 timeBefore = cv::getTickCount();
hipLaunchKernelGGL(( learnFuncFuse), dim3(numberOfThreads) , dim3(numberOfBlocks) , 0, 0, de_input1, d_inputSize1, de_input2, d_inputSize2,
d_output,
d_deltas, de_prevDeltas1, de_prevDeltas2);
// int64 afterBefore = cv::getTickCount();
// std::cout << "Sigm: " << (afterBefore - timeBefore)/ cv::getTickFrequency() << "\n";
}
/*
*
*/
NeuronsPtr FuseLayer::getNeuronPtr()
{
return NeuronsPtr(layerId, d_output,size, d_deltas);
}
/*
*
*/
void FuseLayer::saveToFile(std::ofstream &t_file)
{
t_file << (float) getLayerTypeId() << ' ';
t_file << (float) idFusedLayer1 << ' ';
t_file << (float) idFusedLayer2 << ' ';
}
/*
*
*/
FuseLayer* FuseLayer::loadFromFile(std::ifstream &t_file, std::vector<NeuronsPtr> &t_prevLayerReferences)
{
float idFusedLayer1, idFusedLayer2;
t_file >> idFusedLayer1;
t_file >> idFusedLayer2;
return new FuseLayer(t_prevLayerReferences[idFusedLayer1], t_prevLayerReferences[idFusedLayer2]);
}
/*
*
*/
void FuseLayer::printInfo()
{
std::cout << " (" << layerId << ") Fuse <-- " << idFusedLayer1 << "," << idFusedLayer2 << "\n";
}
}
| ff4de01b408b845547a4b5a05a9884d28d6773d7.cu | //
// Created by przemo on 27.12.2019.
//
#include "FuseLayer.h"
namespace NeuralNetworkGPU
{
/*
*
*/
__global__ void determineOutputFuncFuse(float *t_input1, int *t_inputSize1, float *t_input2, int *t_inputSize2,
float *t_output,
float *t_deltas)
{
//Just copy from 2 sources
long index = threadIdx.x + blockIdx.x*blockDim.x;
t_deltas[index] = 0;
if(index < *t_inputSize1)
{
t_output[index] = t_input1[index];
}
else
{
t_output[index] = t_input2[index-*t_inputSize1];
}
}
/*
*
*/
__global__ void learnFuncFuse(float *t_input1, int *t_inputSize1, float *t_input2, int *t_inputSize2,
float *t_output,
float *t_deltas, float *t_prevDeltas1, float *t_prevDeltas2)
{
long index = threadIdx.x + blockIdx.x*blockDim.x;
//set delta to deeper neurons
if(index < *t_inputSize1)
{
if(t_prevDeltas1 != nullptr) t_prevDeltas1[index] = t_deltas[index];
}
else
{
if(t_prevDeltas2 != nullptr) t_prevDeltas2[index-*t_inputSize1] = t_deltas[index];
}
//reset delta
t_deltas[index] = 0;
}
/*
*
*/
FuseLayer::FuseLayer(NeuronsPtr t_prevLayerReference1, NeuronsPtr t_prevLayerReference2)
{
size = t_prevLayerReference1.size + t_prevLayerReference2.size;
de_input1 = t_prevLayerReference1.inputPtr;
de_input2 = t_prevLayerReference2.inputPtr;
idFusedLayer1 = t_prevLayerReference1.id;
idFusedLayer2 = t_prevLayerReference2.id;
//Input/output
cudaMalloc( (void **) &d_inputSize1, sizeof(int));
cudaMemcpy(d_inputSize1, &(t_prevLayerReference1.size), sizeof(int), cudaMemcpyHostToDevice);
inputSize1 = t_prevLayerReference1.size;
cudaMalloc( (void **) &d_inputSize2, sizeof(int));
cudaMemcpy(d_inputSize2, &(t_prevLayerReference2.size), sizeof(int), cudaMemcpyHostToDevice);
inputSize2 = t_prevLayerReference2.size;
cudaMalloc( (void **) &d_output, sizeof(float)*size);
output = (float*) std::malloc(sizeof(float)*size);
//basic to learn
cudaMalloc( (void **) &d_deltas, sizeof(float)*size);
deltas = (float*) malloc(sizeof(float)*size);
de_prevDeltas1 = t_prevLayerReference1.deltaPtr;
de_prevDeltas2 = t_prevLayerReference2.deltaPtr;
// split to blocks
numberOfBlocks = 1;
while(1)
{
numberOfThreads = size/numberOfBlocks;
if(numberOfThreads<=800 && numberOfThreads*numberOfBlocks==size) break;
numberOfBlocks++;
if(numberOfBlocks > 20 )
{
std::cout << "1 Layer size: "<< t_prevLayerReference1.size << "\n";
std::cout << "2 Layer size: "<< t_prevLayerReference2.size << "\n";
assert(numberOfBlocks < 20 && "Could not match thread/block size");
}
}
}
/*
*
*/
FuseLayer::~FuseLayer()
{
cudaFree(d_inputSize1);
cudaFree(d_inputSize2);
cudaFree(d_output);
cudaFree(d_deltas);
free(output);
free(deltas);
}
/*
*
*/
std::vector<double> FuseLayer::getOutput()
{
cudaMemcpy(output, d_output, sizeof(float)*size, cudaMemcpyDeviceToHost);
std::vector<double> result;
for(int i=0; i<size; i++ )
{
double v = output[i];
result.push_back(v);
}
return result;
}
void FuseLayer::determineOutput()
{
determineOutputFuncFuse<<< numberOfThreads , numberOfBlocks >>>(de_input1, d_inputSize1, de_input2, d_inputSize2,
d_output,
d_deltas);
}
void FuseLayer::learnSGD()
{
// int64 timeBefore = cv::getTickCount();
learnFuncFuse<<< numberOfThreads , numberOfBlocks >>>(de_input1, d_inputSize1, de_input2, d_inputSize2,
d_output,
d_deltas, de_prevDeltas1, de_prevDeltas2);
// int64 afterBefore = cv::getTickCount();
// std::cout << "Sigm: " << (afterBefore - timeBefore)/ cv::getTickFrequency() << "\n";
}
void FuseLayer::learnAdam()
{
// int64 timeBefore = cv::getTickCount();
learnFuncFuse<<< numberOfThreads , numberOfBlocks >>>(de_input1, d_inputSize1, de_input2, d_inputSize2,
d_output,
d_deltas, de_prevDeltas1, de_prevDeltas2);
// int64 afterBefore = cv::getTickCount();
// std::cout << "Sigm: " << (afterBefore - timeBefore)/ cv::getTickFrequency() << "\n";
}
/*
*
*/
NeuronsPtr FuseLayer::getNeuronPtr()
{
return NeuronsPtr(layerId, d_output,size, d_deltas);
}
/*
*
*/
void FuseLayer::saveToFile(std::ofstream &t_file)
{
t_file << (float) getLayerTypeId() << ' ';
t_file << (float) idFusedLayer1 << ' ';
t_file << (float) idFusedLayer2 << ' ';
}
/*
*
*/
FuseLayer* FuseLayer::loadFromFile(std::ifstream &t_file, std::vector<NeuronsPtr> &t_prevLayerReferences)
{
float idFusedLayer1, idFusedLayer2;
t_file >> idFusedLayer1;
t_file >> idFusedLayer2;
return new FuseLayer(t_prevLayerReferences[idFusedLayer1], t_prevLayerReferences[idFusedLayer2]);
}
/*
*
*/
void FuseLayer::printInfo()
{
std::cout << " (" << layerId << ") Fuse <-- " << idFusedLayer1 << "," << idFusedLayer2 << "\n";
}
}
|
e667e61aede371cef24d81ccc5107ecf8ee65289.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cublas_helpers.h"
#include "gemm_operation_profiler.h"
#include "gpu_timer.h"
#include "cutlass/library/singleton.h"
#include "cutlass/library/library.h"
#include "cutlass/library/handle.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
GemmOperationProfiler::GemmOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kGemm,
{
{ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (universal, gemm, planar_complex, planar_complex_array)"},
{ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kTensor, {"D"}, "Tensor storing the D output"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "Variant of split K mode(serial, parallel)"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of GEMMs computed in one batch"},
},
{ library::Provider::kCUBLAS}
) {
description_ = " General matrix-matrix product. D = alpha * A*B + beta * C";
}
/// Destructor
GemmOperationProfiler::~GemmOperationProfiler() {
}
/// Prints usage statement for the math function
void GemmOperationProfiler::print_usage(std::ostream &out) const {
out << "GEMM" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void GemmOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024 --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=Gemm --accumulator-type=f16,f32\n\n"
<< "Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=Gemm --A=f16:column --B=*:row\n\n"
<< "Profile a particular problem size with split K and parallel reduction:\n"
<< " $ cutlass_profiler --operation=Gemm --split_k_mode=parallel --split_k_slices=2 --m=1024 --n=1024 --k=128\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=Gemm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=Gemm --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=Gemm --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=Gemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=Gemm \\ \n"
<< " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status GemmOperationProfiler::GemmProblem::parse(
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
this->mode = library::GemmUniversalMode::kGemm;
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_SplitKModeID(this->split_k_mode, "split_k_mode", problem_space, problem)) {
// default value
this->split_k_mode = library::SplitKMode::kSerial;
}
this->mode = library::GemmUniversalMode::kGemm;
if(this->split_k_mode == library::SplitKMode::kParallel) {
this->mode = library::GemmUniversalMode::kGemmSplitKParallel;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
} else if (this->batch_count > 1) {
this->mode = library::GemmUniversalMode::kBatched;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.D, "D", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->m), int(this->k)}).front();
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->k), int(this->n)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->m), int(this->n)}).front();
return Status::kSuccess;
}
/// Total number of bytes loaded
int64_t GemmOperationProfiler::GemmProblem::bytes(library::GemmDescription const &operation_desc) const {
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * m / 8) * k +
int64_t(library::sizeof_bits(operation_desc.B.element) * n / 8) * k +
int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
}
bytes *= batch_count;
return bytes;
}
/// Total number of flops computed
int64_t GemmOperationProfiler::GemmProblem::flops(library::GemmDescription const &operation_desc) const {
int64_t flops_ = (int64_t(m) * n * k + m * n) * 2 * batch_count;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddGaussianComplex:
flops_ *= 3;
break;
default: break;
}
return flops_;
}
/// Initializes a performance result
void GemmOperationProfiler::GemmProblem::initialize_result(
PerformanceResult &result,
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "D", problem_space,
std::string(library::to_string(operation_desc.D.element)) + ":" + library::to_string(operation_desc.D.layout));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_mode", problem_space, library::to_string(split_k_mode));
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status GemmOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::GemmDescription const &operation_desc =
static_cast<library::GemmDescription const &>(operation->description());
if (operation_desc.gemm_kind != library::GemmKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
gemm_workspace_.configuration.mode = problem_.mode;
gemm_workspace_.configuration.problem_size.m() = int(problem_.m);
gemm_workspace_.configuration.problem_size.n() = int(problem_.n);
gemm_workspace_.configuration.problem_size.k() = int(problem_.k);
gemm_workspace_.configuration.lda = problem_.lda;
gemm_workspace_.configuration.ldb = problem_.ldb;
gemm_workspace_.configuration.ldc = problem_.ldc;
gemm_workspace_.configuration.ldd = problem_.ldc;
if (problem_.mode == library::GemmUniversalMode::kBatched) {
gemm_workspace_.configuration.batch_count = problem_.batch_count;
}
else {
gemm_workspace_.configuration.batch_count = problem_.split_k_slices;
}
gemm_workspace_.arguments.A = nullptr;
gemm_workspace_.arguments.B = nullptr;
gemm_workspace_.arguments.C = nullptr;
gemm_workspace_.arguments.D = nullptr;
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
// initialize reduction operation for parallel splitKMode
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
if (!initialize_reduction_configuration_(operation, problem)) {
return Status::kErrorInternal;
}
}
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments);
}
/// Initializes the performance result
void GemmOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
result.bytes = problem_.bytes(operation_desc);
result.flops = problem_.flops(operation_desc);
result.runtime = 0;
}
/// Initialize reduction problem dimensions and library::Operation
bool GemmOperationProfiler::initialize_reduction_configuration_(
library::Operation const *operation,
ProblemSpace::Problem const &problem) {
library::GemmDescription const &gemm_desc =
static_cast<library::GemmDescription const&>(operation->description());
if (!cast_from_double(problem_.alpha_one, gemm_desc.element_epilogue, 1)) {
return false;
}
if (!cast_from_double(problem_.beta_zero, gemm_desc.element_epilogue, 0)) {
return false;
}
/// initialize library::ReductionConfiguration
gemm_workspace_.reduction_configuration.problem_size = gemm::GemmCoord(int(problem_.n), int(problem_.m), int(problem_.k)).mn();
gemm_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices);
gemm_workspace_.reduction_configuration.partition_stride = gemm::GemmCoord(int(problem_.n), int(problem_.m), int(problem_.k)).mn().product();
gemm_workspace_.reduction_configuration.ldw = problem_.ldc;
gemm_workspace_.reduction_configuration.lds = problem_.ldc;
gemm_workspace_.reduction_configuration.ldd = problem_.ldc;
// find reduction operation
library::ReductionFunctionalKey reduction_key(
library::Provider::kCUTLASS,
gemm_desc.tile_description.math_instruction.element_accumulator, // element workspace
gemm_desc.tile_description.math_instruction.element_accumulator, // element accumulator
gemm_desc.D.element, // element output
gemm_desc.element_epilogue // element compute
);
auto reduction_it = library::Singleton::get().operation_table.reduction_operations.find(reduction_key);
if (reduction_it == library::Singleton::get().operation_table.reduction_operations.end()) {
return false;
}
// initialize reduction operation required for parallel split-k operator
reduction_op_ = reduction_it->second;
// reduction operation found and initialized
return true;
}
/// Initializes workspace
Status GemmOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::Operation const* underlying_operation = operation;
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
library::GemmDescription const &operation_desc =
static_cast<library::GemmDescription const &>(operation->description());
// Compute the number of copies of the problem to avoid L2 camping.
if (!options.profiling.workspace_count) {
int64_t bytes = problem_.bytes(operation_desc);
if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) {
gemm_workspace_.problem_count =
1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes);
}
else {
gemm_workspace_.problem_count = 1;
}
}
else {
gemm_workspace_.problem_count = options.profiling.workspace_count;
}
bool allocate_device_tensors = options.execution_mode != ExecutionMode::kDryRun;
if (allocate_device_tensors) {
int seed_shift = 0;
gemm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.m), int(problem_.k)},
{int(problem_.lda)},
problem_.batch_count * gemm_workspace_.problem_count,
seed_shift++
);
gemm_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
{int(problem_.k), int(problem_.n)},
{int(problem_.ldb)},
problem_.batch_count * gemm_workspace_.problem_count,
seed_shift++
);
gemm_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count,
seed_shift++
);
gemm_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.D.element,
operation_desc.D.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count
);
gemm_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.D.element,
operation_desc.D.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count
);
}
if (options.execution_mode != ExecutionMode::kDryRun) {
// NOTE: the leading non-batch strides are duplicated here for 3.0 API kernels
gemm_workspace_.arguments.problem_size = {int(problem_.m), int(problem_.n), int(problem_.k)};
gemm_workspace_.arguments.batch_count = problem_.batch_count;
gemm_workspace_.arguments.lda = problem_.lda;
gemm_workspace_.arguments.ldb = problem_.ldb;
gemm_workspace_.arguments.ldc = problem_.ldc;
gemm_workspace_.arguments.ldd = problem_.ldc;
gemm_workspace_.arguments.batch_stride_A = problem_.lda;
gemm_workspace_.arguments.batch_stride_B = problem_.ldb;
gemm_workspace_.arguments.batch_stride_C = problem_.ldc;
gemm_workspace_.arguments.batch_stride_D = problem_.ldc;
/* Query device SM count to pass onto the kernel as an argument, where needed */
gemm_workspace_.arguments.sm_count = options.device.properties.multiProcessorCount;
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = underlying_operation->get_host_workspace_size(&gemm_workspace_.configuration);
gemm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = underlying_operation->get_device_workspace_size(&gemm_workspace_.configuration,
&gemm_workspace_.arguments);
gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = underlying_operation->initialize(
&gemm_workspace_.configuration,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
if (status != Status::kSuccess) {
return status;
}
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
workspace_size = reduction_op_->get_host_workspace_size(&gemm_workspace_.reduction_configuration);
gemm_workspace_.reduction_host_workspace.resize(workspace_size, 0);
status = reduction_op_->initialize(
&gemm_workspace_.reduction_configuration,
gemm_workspace_.reduction_host_workspace.data(),
nullptr);
if (status != Status::kSuccess) {
return status;
}
}
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kGemm;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.C->batch_stride();
gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Computed->batch_stride();
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
gemm_workspace_.arguments.alpha = problem_.alpha_one.data();
gemm_workspace_.arguments.beta = problem_.beta_zero.data();
gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->data();
gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->data();
gemm_workspace_.reduction_arguments.alpha = problem_.alpha.data();
gemm_workspace_.reduction_arguments.beta = problem_.beta.data();
gemm_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
}
//
// Run the CUTLASS operation
//
// initialize gemm underlying operation to handle parallel reduction
library::Operation const * underlying_operation = operation;
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
results_.back().status = underlying_operation->run(
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// Run parallel reduction kernel for parallel split_k_mode
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
results_.back().status = reduction_op_->run(
&gemm_workspace_.reduction_arguments,
gemm_workspace_.reduction_host_workspace.data(),
nullptr);
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
hipError_t result = hipDeviceSynchronize();
if (result != hipSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const & gemm_desc = static_cast<library::GemmDescription const &>(operation->description());
if (cublas_satisfies(gemm_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
bool verification_status = verify_with_reference_(options, report, device_context, operation, problem_space, problem);
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for (auto &m : results_.back().verification_map) {
if (m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if (!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if (is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// if verification.required is set, then return success iff at least one ref-check was run
if (options.verification.required) {
bool did_any_verification_run = false;
for (auto provider : options.verification.providers) {
did_any_verification_run |= (Disposition::kNotRun != results_.back().verification_map[provider]);
}
if (not did_any_verification_run) {
results_.back().status = Status::kErrorNotSupported;
return false;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
#if CUTLASS_ENABLE_CUBLAS
library::GemmDescription const &gemm_desc =
static_cast<library::GemmDescription const &>(operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
hipblasStatus_t status = handle.get_cublas_create_status();
if (status != HIPBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = get_cutlass_disposition(status);
return true;
}
std::vector<hipblasGemmAlgo_t> algorithms;
detail::select_cublas_algorithms(
algorithms,
options,
gemm_desc);
if (algorithms.empty()) {
// no algorithm selected
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to hipblasGemmEx()
//
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.C = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.Reference->batch_stride();
gemm_workspace_.arguments.D = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Reference->batch_stride();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
detail::cublasGemmExDispatcher gemm_op(
gemm_desc,
gemm_workspace_.configuration,
gemm_workspace_.arguments,
algorithms.front()
);
if (gemm_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = gemm_op(handle);
// Handle errors
if (status != HIPBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = get_cutlass_disposition(status);
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
options,
*gemm_workspace_.Computed,
*gemm_workspace_.Reference,
gemm_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
gemm_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against host and device references
bool GemmOperationProfiler::verify_with_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::GemmDescription const &gemm_desc =
static_cast<library::GemmDescription const &>(operation->description());
//
// Initialize state
//
for (auto provider : options.verification.providers) {
// Skip providers that are not enabled
if (!options.verification.provider_enabled(provider)) {
continue;
}
void *ptr_A = gemm_workspace_.A->data();
void *ptr_B = gemm_workspace_.B->data();
void *ptr_C = gemm_workspace_.C->data();
void *ptr_D = gemm_workspace_.Reference->data();
// To support the host-side reference, conditionally allocate and
// copy tensors to host memory.
std::vector<uint8_t> host_data_A;
std::vector<uint8_t> host_data_B;
std::vector<uint8_t> host_data_C;
std::vector<uint8_t> host_data_D;
if (provider == library::Provider::kReferenceHost) {
host_data_A.resize(gemm_workspace_.A->bytes());
ptr_A = host_data_A.data();
gemm_workspace_.A->copy_to_host(ptr_A);
host_data_B.resize(gemm_workspace_.B->bytes());
ptr_B = host_data_B.data();
gemm_workspace_.B->copy_to_host(ptr_B);
host_data_C.resize(gemm_workspace_.C->bytes());
ptr_C = host_data_C.data();
gemm_workspace_.C->copy_to_host(ptr_C);
host_data_D.resize(gemm_workspace_.Reference->bytes());
ptr_D = host_data_D.data();
}
//
// Launch
//
library::Handle handle;
handle.set_provider(provider);
Status status = handle.gemm_universal(
problem_.mode,
gemm_workspace_.configuration.problem_size.m(),
gemm_workspace_.configuration.problem_size.n(),
gemm_workspace_.configuration.problem_size.k(),
gemm_desc.tile_description.math_instruction.element_accumulator,
gemm_desc.element_epilogue,
problem_.alpha.data(),
gemm_desc.A.element,
gemm_desc.A.layout,
gemm_desc.transform_A,
ptr_A,
int(gemm_workspace_.configuration.lda),
gemm_desc.B.element,
gemm_desc.B.layout,
gemm_desc.transform_B,
ptr_B,
int(gemm_workspace_.configuration.ldb),
problem_.beta.data(),
gemm_desc.C.element,
gemm_desc.C.layout,
ptr_C,
int(gemm_workspace_.configuration.ldc),
gemm_desc.D.element,
gemm_desc.D.layout,
ptr_D,
int(gemm_workspace_.configuration.ldd),
gemm_workspace_.configuration.batch_count,
gemm_workspace_.A->batch_stride(),
gemm_workspace_.B->batch_stride(),
gemm_workspace_.C->batch_stride(),
gemm_workspace_.Reference->batch_stride());
if (status != Status::kSuccess) {
results_.back().verification_map[provider] = Disposition::kNotRun;
continue;
}
results_.back().status = status;
if (provider == library::Provider::kReferenceHost) {
gemm_workspace_.Reference->copy_from_host(ptr_D);
}
//
// Verify results
//
results_.back().verification_map[provider] = compare_tensors(
options,
*gemm_workspace_.Computed,
*gemm_workspace_.Reference,
gemm_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[provider] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
gemm_desc,
library::Provider::kCUTLASS,
provider);
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool GemmOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.C->batch_stride();
gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Computed->batch_stride();
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
gemm_workspace_.arguments.alpha = problem_.alpha_one.data();
gemm_workspace_.arguments.beta = problem_.beta_zero.data();
gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->data();
gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->data();
gemm_workspace_.reduction_arguments.alpha = problem_.alpha.data();
gemm_workspace_.reduction_arguments.beta = problem_.beta.data();
gemm_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
}
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Method to profile a CUTLASS Operation
Status GemmOperationProfiler::profile_cutlass_(
double &runtime,
Options const &options,
library::Operation const *operation,
void *arguments,
void *host_workspace,
void *device_workspace) {
GpuTimer timer;
// initialize gemm underlying operation to handle parallel reduction
library::Operation const * underlying_operation = operation;
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
//
// Optional sleep to limit power consumption and thermals
//
sleep(options.profiling.sleep_duration);
//
// Warmup loop
//
Status status;
for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) {
int problem_idx = (iteration % gemm_workspace_.problem_count) * problem_.batch_count;
gemm_workspace_.arguments.A = gemm_workspace_.A->batch_data(problem_idx);
gemm_workspace_.arguments.B = gemm_workspace_.B->batch_data(problem_idx);
gemm_workspace_.arguments.C = gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.arguments.D = gemm_workspace_.Computed->batch_data(problem_idx);
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->batch_data(problem_idx);
}
// Execute the CUTLASS operation
status = underlying_operation->run(
&gemm_workspace_.arguments,
host_workspace,
device_workspace);
if (status != Status::kSuccess) {
return status;
}
// Run parallel reduction kernel for parallel split_k_mode
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
status = reduction_op_->run(
&gemm_workspace_.reduction_arguments,
gemm_workspace_.reduction_host_workspace.data(),
nullptr);
if (status != Status::kSuccess) {
return status;
}
}
}
//
// Initialize GPU timer
//
timer.start();
//
// Profiling loop
//
int Iterations = options.profiling.iterations;
int iteration = 0;
for (; iteration < Iterations; ++iteration) {
// Iterate over copies of the problem in memory
int workspace_idx = options.profiling.warmup_iterations + iteration;
int problem_idx = (workspace_idx % gemm_workspace_.problem_count) * problem_.batch_count;
gemm_workspace_.arguments.A = gemm_workspace_.A->batch_data(problem_idx);
gemm_workspace_.arguments.B = gemm_workspace_.B->batch_data(problem_idx);
gemm_workspace_.arguments.C = gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.arguments.D = gemm_workspace_.Computed->batch_data(problem_idx);
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->batch_data(problem_idx);
}
status = underlying_operation->run(
arguments,
host_workspace,
device_workspace);
if (status != Status::kSuccess) {
return status;
}
// Run parallel reduction kernel for parallel split_k_mode
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
status = reduction_op_->run(
&gemm_workspace_.reduction_arguments,
gemm_workspace_.reduction_host_workspace.data(),
nullptr);
if (status != Status::kSuccess) {
return status;
}
}
}
//
// Wait for completion
//
timer.stop_and_wait();
//
// Update performance result
//
runtime = timer.duration(iteration);
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| e667e61aede371cef24d81ccc5107ecf8ee65289.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cublas_helpers.h"
#include "gemm_operation_profiler.h"
#include "gpu_timer.h"
#include "cutlass/library/singleton.h"
#include "cutlass/library/library.h"
#include "cutlass/library/handle.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
GemmOperationProfiler::GemmOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kGemm,
{
{ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (universal, gemm, planar_complex, planar_complex_array)"},
{ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kTensor, {"D"}, "Tensor storing the D output"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "Variant of split K mode(serial, parallel)"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of GEMMs computed in one batch"},
},
{ library::Provider::kCUBLAS}
) {
description_ = " General matrix-matrix product. D = alpha * A*B + beta * C";
}
/// Destructor
GemmOperationProfiler::~GemmOperationProfiler() {
}
/// Prints usage statement for the math function
void GemmOperationProfiler::print_usage(std::ostream &out) const {
out << "GEMM" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void GemmOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024 --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=Gemm --accumulator-type=f16,f32\n\n"
<< "Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=Gemm --A=f16:column --B=*:row\n\n"
<< "Profile a particular problem size with split K and parallel reduction:\n"
<< " $ cutlass_profiler --operation=Gemm --split_k_mode=parallel --split_k_slices=2 --m=1024 --n=1024 --k=128\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=Gemm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=Gemm --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=Gemm --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=Gemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=Gemm \\ \n"
<< " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status GemmOperationProfiler::GemmProblem::parse(
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
this->mode = library::GemmUniversalMode::kGemm;
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_SplitKModeID(this->split_k_mode, "split_k_mode", problem_space, problem)) {
// default value
this->split_k_mode = library::SplitKMode::kSerial;
}
this->mode = library::GemmUniversalMode::kGemm;
if(this->split_k_mode == library::SplitKMode::kParallel) {
this->mode = library::GemmUniversalMode::kGemmSplitKParallel;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
} else if (this->batch_count > 1) {
this->mode = library::GemmUniversalMode::kBatched;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.D, "D", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->m), int(this->k)}).front();
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->k), int(this->n)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->m), int(this->n)}).front();
return Status::kSuccess;
}
/// Total number of bytes loaded
int64_t GemmOperationProfiler::GemmProblem::bytes(library::GemmDescription const &operation_desc) const {
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * m / 8) * k +
int64_t(library::sizeof_bits(operation_desc.B.element) * n / 8) * k +
int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
}
bytes *= batch_count;
return bytes;
}
/// Total number of flops computed
int64_t GemmOperationProfiler::GemmProblem::flops(library::GemmDescription const &operation_desc) const {
int64_t flops_ = (int64_t(m) * n * k + m * n) * 2 * batch_count;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddGaussianComplex:
flops_ *= 3;
break;
default: break;
}
return flops_;
}
/// Initializes a performance result
void GemmOperationProfiler::GemmProblem::initialize_result(
PerformanceResult &result,
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "D", problem_space,
std::string(library::to_string(operation_desc.D.element)) + ":" + library::to_string(operation_desc.D.layout));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_mode", problem_space, library::to_string(split_k_mode));
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status GemmOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::GemmDescription const &operation_desc =
static_cast<library::GemmDescription const &>(operation->description());
if (operation_desc.gemm_kind != library::GemmKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
gemm_workspace_.configuration.mode = problem_.mode;
gemm_workspace_.configuration.problem_size.m() = int(problem_.m);
gemm_workspace_.configuration.problem_size.n() = int(problem_.n);
gemm_workspace_.configuration.problem_size.k() = int(problem_.k);
gemm_workspace_.configuration.lda = problem_.lda;
gemm_workspace_.configuration.ldb = problem_.ldb;
gemm_workspace_.configuration.ldc = problem_.ldc;
gemm_workspace_.configuration.ldd = problem_.ldc;
if (problem_.mode == library::GemmUniversalMode::kBatched) {
gemm_workspace_.configuration.batch_count = problem_.batch_count;
}
else {
gemm_workspace_.configuration.batch_count = problem_.split_k_slices;
}
gemm_workspace_.arguments.A = nullptr;
gemm_workspace_.arguments.B = nullptr;
gemm_workspace_.arguments.C = nullptr;
gemm_workspace_.arguments.D = nullptr;
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
// initialize reduction operation for parallel splitKMode
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
if (!initialize_reduction_configuration_(operation, problem)) {
return Status::kErrorInternal;
}
}
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments);
}
/// Initializes the performance result
void GemmOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
result.bytes = problem_.bytes(operation_desc);
result.flops = problem_.flops(operation_desc);
result.runtime = 0;
}
/// Initialize reduction problem dimensions and library::Operation
bool GemmOperationProfiler::initialize_reduction_configuration_(
library::Operation const *operation,
ProblemSpace::Problem const &problem) {
library::GemmDescription const &gemm_desc =
static_cast<library::GemmDescription const&>(operation->description());
if (!cast_from_double(problem_.alpha_one, gemm_desc.element_epilogue, 1)) {
return false;
}
if (!cast_from_double(problem_.beta_zero, gemm_desc.element_epilogue, 0)) {
return false;
}
/// initialize library::ReductionConfiguration
gemm_workspace_.reduction_configuration.problem_size = gemm::GemmCoord(int(problem_.n), int(problem_.m), int(problem_.k)).mn();
gemm_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices);
gemm_workspace_.reduction_configuration.partition_stride = gemm::GemmCoord(int(problem_.n), int(problem_.m), int(problem_.k)).mn().product();
gemm_workspace_.reduction_configuration.ldw = problem_.ldc;
gemm_workspace_.reduction_configuration.lds = problem_.ldc;
gemm_workspace_.reduction_configuration.ldd = problem_.ldc;
// find reduction operation
library::ReductionFunctionalKey reduction_key(
library::Provider::kCUTLASS,
gemm_desc.tile_description.math_instruction.element_accumulator, // element workspace
gemm_desc.tile_description.math_instruction.element_accumulator, // element accumulator
gemm_desc.D.element, // element output
gemm_desc.element_epilogue // element compute
);
auto reduction_it = library::Singleton::get().operation_table.reduction_operations.find(reduction_key);
if (reduction_it == library::Singleton::get().operation_table.reduction_operations.end()) {
return false;
}
// initialize reduction operation required for parallel split-k operator
reduction_op_ = reduction_it->second;
// reduction operation found and initialized
return true;
}
/// Initializes workspace
Status GemmOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::Operation const* underlying_operation = operation;
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
library::GemmDescription const &operation_desc =
static_cast<library::GemmDescription const &>(operation->description());
// Compute the number of copies of the problem to avoid L2 camping.
if (!options.profiling.workspace_count) {
int64_t bytes = problem_.bytes(operation_desc);
if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) {
gemm_workspace_.problem_count =
1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes);
}
else {
gemm_workspace_.problem_count = 1;
}
}
else {
gemm_workspace_.problem_count = options.profiling.workspace_count;
}
bool allocate_device_tensors = options.execution_mode != ExecutionMode::kDryRun;
if (allocate_device_tensors) {
int seed_shift = 0;
gemm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.m), int(problem_.k)},
{int(problem_.lda)},
problem_.batch_count * gemm_workspace_.problem_count,
seed_shift++
);
gemm_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
{int(problem_.k), int(problem_.n)},
{int(problem_.ldb)},
problem_.batch_count * gemm_workspace_.problem_count,
seed_shift++
);
gemm_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count,
seed_shift++
);
gemm_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.D.element,
operation_desc.D.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count
);
gemm_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.D.element,
operation_desc.D.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count
);
}
if (options.execution_mode != ExecutionMode::kDryRun) {
// NOTE: the leading non-batch strides are duplicated here for 3.0 API kernels
gemm_workspace_.arguments.problem_size = {int(problem_.m), int(problem_.n), int(problem_.k)};
gemm_workspace_.arguments.batch_count = problem_.batch_count;
gemm_workspace_.arguments.lda = problem_.lda;
gemm_workspace_.arguments.ldb = problem_.ldb;
gemm_workspace_.arguments.ldc = problem_.ldc;
gemm_workspace_.arguments.ldd = problem_.ldc;
gemm_workspace_.arguments.batch_stride_A = problem_.lda;
gemm_workspace_.arguments.batch_stride_B = problem_.ldb;
gemm_workspace_.arguments.batch_stride_C = problem_.ldc;
gemm_workspace_.arguments.batch_stride_D = problem_.ldc;
/* Query device SM count to pass onto the kernel as an argument, where needed */
gemm_workspace_.arguments.sm_count = options.device.properties.multiProcessorCount;
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = underlying_operation->get_host_workspace_size(&gemm_workspace_.configuration);
gemm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = underlying_operation->get_device_workspace_size(&gemm_workspace_.configuration,
&gemm_workspace_.arguments);
gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = underlying_operation->initialize(
&gemm_workspace_.configuration,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
if (status != Status::kSuccess) {
return status;
}
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
workspace_size = reduction_op_->get_host_workspace_size(&gemm_workspace_.reduction_configuration);
gemm_workspace_.reduction_host_workspace.resize(workspace_size, 0);
status = reduction_op_->initialize(
&gemm_workspace_.reduction_configuration,
gemm_workspace_.reduction_host_workspace.data(),
nullptr);
if (status != Status::kSuccess) {
return status;
}
}
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kGemm;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.C->batch_stride();
gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Computed->batch_stride();
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
gemm_workspace_.arguments.alpha = problem_.alpha_one.data();
gemm_workspace_.arguments.beta = problem_.beta_zero.data();
gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->data();
gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->data();
gemm_workspace_.reduction_arguments.alpha = problem_.alpha.data();
gemm_workspace_.reduction_arguments.beta = problem_.beta.data();
gemm_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
}
//
// Run the CUTLASS operation
//
// initialize gemm underlying operation to handle parallel reduction
library::Operation const * underlying_operation = operation;
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
results_.back().status = underlying_operation->run(
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// Run parallel reduction kernel for parallel split_k_mode
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
results_.back().status = reduction_op_->run(
&gemm_workspace_.reduction_arguments,
gemm_workspace_.reduction_host_workspace.data(),
nullptr);
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const & gemm_desc = static_cast<library::GemmDescription const &>(operation->description());
if (cublas_satisfies(gemm_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
bool verification_status = verify_with_reference_(options, report, device_context, operation, problem_space, problem);
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for (auto &m : results_.back().verification_map) {
if (m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if (!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if (is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// if verification.required is set, then return success iff at least one ref-check was run
if (options.verification.required) {
bool did_any_verification_run = false;
for (auto provider : options.verification.providers) {
did_any_verification_run |= (Disposition::kNotRun != results_.back().verification_map[provider]);
}
if (not did_any_verification_run) {
results_.back().status = Status::kErrorNotSupported;
return false;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
#if CUTLASS_ENABLE_CUBLAS
library::GemmDescription const &gemm_desc =
static_cast<library::GemmDescription const &>(operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
cublasStatus_t status = handle.get_cublas_create_status();
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = get_cutlass_disposition(status);
return true;
}
std::vector<cublasGemmAlgo_t> algorithms;
detail::select_cublas_algorithms(
algorithms,
options,
gemm_desc);
if (algorithms.empty()) {
// no algorithm selected
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublasGemmEx()
//
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.C = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.Reference->batch_stride();
gemm_workspace_.arguments.D = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Reference->batch_stride();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
detail::cublasGemmExDispatcher gemm_op(
gemm_desc,
gemm_workspace_.configuration,
gemm_workspace_.arguments,
algorithms.front()
);
if (gemm_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = gemm_op(handle);
// Handle errors
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = get_cutlass_disposition(status);
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
options,
*gemm_workspace_.Computed,
*gemm_workspace_.Reference,
gemm_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
gemm_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against host and device references
bool GemmOperationProfiler::verify_with_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::GemmDescription const &gemm_desc =
static_cast<library::GemmDescription const &>(operation->description());
//
// Initialize state
//
for (auto provider : options.verification.providers) {
// Skip providers that are not enabled
if (!options.verification.provider_enabled(provider)) {
continue;
}
void *ptr_A = gemm_workspace_.A->data();
void *ptr_B = gemm_workspace_.B->data();
void *ptr_C = gemm_workspace_.C->data();
void *ptr_D = gemm_workspace_.Reference->data();
// To support the host-side reference, conditionally allocate and
// copy tensors to host memory.
std::vector<uint8_t> host_data_A;
std::vector<uint8_t> host_data_B;
std::vector<uint8_t> host_data_C;
std::vector<uint8_t> host_data_D;
if (provider == library::Provider::kReferenceHost) {
host_data_A.resize(gemm_workspace_.A->bytes());
ptr_A = host_data_A.data();
gemm_workspace_.A->copy_to_host(ptr_A);
host_data_B.resize(gemm_workspace_.B->bytes());
ptr_B = host_data_B.data();
gemm_workspace_.B->copy_to_host(ptr_B);
host_data_C.resize(gemm_workspace_.C->bytes());
ptr_C = host_data_C.data();
gemm_workspace_.C->copy_to_host(ptr_C);
host_data_D.resize(gemm_workspace_.Reference->bytes());
ptr_D = host_data_D.data();
}
//
// Launch
//
library::Handle handle;
handle.set_provider(provider);
Status status = handle.gemm_universal(
problem_.mode,
gemm_workspace_.configuration.problem_size.m(),
gemm_workspace_.configuration.problem_size.n(),
gemm_workspace_.configuration.problem_size.k(),
gemm_desc.tile_description.math_instruction.element_accumulator,
gemm_desc.element_epilogue,
problem_.alpha.data(),
gemm_desc.A.element,
gemm_desc.A.layout,
gemm_desc.transform_A,
ptr_A,
int(gemm_workspace_.configuration.lda),
gemm_desc.B.element,
gemm_desc.B.layout,
gemm_desc.transform_B,
ptr_B,
int(gemm_workspace_.configuration.ldb),
problem_.beta.data(),
gemm_desc.C.element,
gemm_desc.C.layout,
ptr_C,
int(gemm_workspace_.configuration.ldc),
gemm_desc.D.element,
gemm_desc.D.layout,
ptr_D,
int(gemm_workspace_.configuration.ldd),
gemm_workspace_.configuration.batch_count,
gemm_workspace_.A->batch_stride(),
gemm_workspace_.B->batch_stride(),
gemm_workspace_.C->batch_stride(),
gemm_workspace_.Reference->batch_stride());
if (status != Status::kSuccess) {
results_.back().verification_map[provider] = Disposition::kNotRun;
continue;
}
results_.back().status = status;
if (provider == library::Provider::kReferenceHost) {
gemm_workspace_.Reference->copy_from_host(ptr_D);
}
//
// Verify results
//
results_.back().verification_map[provider] = compare_tensors(
options,
*gemm_workspace_.Computed,
*gemm_workspace_.Reference,
gemm_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[provider] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
gemm_desc,
library::Provider::kCUTLASS,
provider);
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool GemmOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.C->batch_stride();
gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Computed->batch_stride();
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
gemm_workspace_.arguments.alpha = problem_.alpha_one.data();
gemm_workspace_.arguments.beta = problem_.beta_zero.data();
gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->data();
gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->data();
gemm_workspace_.reduction_arguments.alpha = problem_.alpha.data();
gemm_workspace_.reduction_arguments.beta = problem_.beta.data();
gemm_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
}
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Method to profile a CUTLASS Operation
Status GemmOperationProfiler::profile_cutlass_(
double &runtime,
Options const &options,
library::Operation const *operation,
void *arguments,
void *host_workspace,
void *device_workspace) {
GpuTimer timer;
// initialize gemm underlying operation to handle parallel reduction
library::Operation const * underlying_operation = operation;
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
//
// Optional sleep to limit power consumption and thermals
//
sleep(options.profiling.sleep_duration);
//
// Warmup loop
//
Status status;
for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) {
int problem_idx = (iteration % gemm_workspace_.problem_count) * problem_.batch_count;
gemm_workspace_.arguments.A = gemm_workspace_.A->batch_data(problem_idx);
gemm_workspace_.arguments.B = gemm_workspace_.B->batch_data(problem_idx);
gemm_workspace_.arguments.C = gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.arguments.D = gemm_workspace_.Computed->batch_data(problem_idx);
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->batch_data(problem_idx);
}
// Execute the CUTLASS operation
status = underlying_operation->run(
&gemm_workspace_.arguments,
host_workspace,
device_workspace);
if (status != Status::kSuccess) {
return status;
}
// Run parallel reduction kernel for parallel split_k_mode
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
status = reduction_op_->run(
&gemm_workspace_.reduction_arguments,
gemm_workspace_.reduction_host_workspace.data(),
nullptr);
if (status != Status::kSuccess) {
return status;
}
}
}
//
// Initialize GPU timer
//
timer.start();
//
// Profiling loop
//
int Iterations = options.profiling.iterations;
int iteration = 0;
for (; iteration < Iterations; ++iteration) {
// Iterate over copies of the problem in memory
int workspace_idx = options.profiling.warmup_iterations + iteration;
int problem_idx = (workspace_idx % gemm_workspace_.problem_count) * problem_.batch_count;
gemm_workspace_.arguments.A = gemm_workspace_.A->batch_data(problem_idx);
gemm_workspace_.arguments.B = gemm_workspace_.B->batch_data(problem_idx);
gemm_workspace_.arguments.C = gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.arguments.D = gemm_workspace_.Computed->batch_data(problem_idx);
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->batch_data(problem_idx);
}
status = underlying_operation->run(
arguments,
host_workspace,
device_workspace);
if (status != Status::kSuccess) {
return status;
}
// Run parallel reduction kernel for parallel split_k_mode
if (problem_.split_k_mode == library::SplitKMode::kParallel) {
status = reduction_op_->run(
&gemm_workspace_.reduction_arguments,
gemm_workspace_.reduction_host_workspace.data(),
nullptr);
if (status != Status::kSuccess) {
return status;
}
}
}
//
// Wait for completion
//
timer.stop_and_wait();
//
// Update performance result
//
runtime = timer.duration(iteration);
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
df49566ae66f91d6bab749bf178c223ae82fbe0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***********************************************
streamcluster_cuda.cu
: parallelized code of streamcluster
- original code from PARSEC Benchmark Suite
- parallelization with CUDA API has been applied by
Sang-Ha (a.k.a Shawn) Lee - sl4ge@virginia.edu
University of Virginia
Department of Electrical and Computer Engineering
Department of Computer Science
***********************************************/
/* For a given point x, find the cost of the following operation:
* -- open a facility at x if there isn't already one there,
* -- for points y such that the assignment distance of y exceeds dist(y, x),
* make y a member of x,
* -- for facilities y such that reassigning y and all its members to x
* would save cost, realize this closing and reassignment.
*
* If the cost of this operation is negative (i.e., if this entire operation
* saves cost), perform this operation and return the amount of cost saved;
* otherwise, do nothing.
*/
/* numcenters will be updated to reflect the new number of centers */
/* z is the facility cost, x is the number of this point in the array
points */
#include "streamcluster_header.cu"
#include "prof.cu"
using namespace std;
#define THREADS_PER_BLOCK 512
#define MAXBLOCKS 65536
#define PROFILE
/* host memory analogous to device memory */
float *work_mem_h;
static float *coord_h;
float *gl_lower;
Point *p;
/* device memory */
float *work_mem_d;
float *coord_d;
int *center_table_d;
bool *switch_membership_d;
static int c; // counters
/* kernel */
__global__ void
pgain_kernel( int num,
int dim,
long x,
Point *p,
int K,
float *coord_d,
float *work_mem_d,
int *center_table_d,
bool *switch_membership_d
)
{
/* block ID and global thread ID */
const int block_id = blockIdx.x + gridDim.x * blockIdx.y;
const int thread_id = blockDim.x * block_id + threadIdx.x;
extern __shared__ float coord_s[]; // shared memory for coordinate of point[x]
/* coordinate mapping of point[x] to shared mem */
if(threadIdx.x == 0)
for(int i=0; i<dim; i++) { coord_s[i] = coord_d[i*num + x]; }
__syncthreads();
/* cost between this point and point[x]: euclidean distance multiplied by weight */
float x_cost = 0.0;
for(int i=0; i<dim; i++)
x_cost += (coord_d[(i*num)+thread_id]-coord_s[i]) * (coord_d[(i*num)+thread_id]-coord_s[i]);
x_cost = x_cost * p[thread_id].weight;
float current_cost = p[thread_id].cost;
/* if computed cost is less then original (it saves), mark it as to reassign */
float *lower = &work_mem_d[thread_id*(K+1)];
if ( x_cost < current_cost ) {
switch_membership_d[thread_id] = 1;
lower[K] += x_cost - current_cost;
}
/* if computed cost is larger, save the difference */
else {
int assign = p[thread_id].assign;
lower[center_table_d[assign]] += current_cost - x_cost;
}
}
void quit(char *message){
printf("%s\n", message);
exit(1);
}
void allocDevMem(int num, int dim, int kmax){
if( hipMalloc((void**) &work_mem_d, kmax * num * sizeof(float))!= hipSuccess) quit("error allocating device memory");
if( hipMalloc((void**) ¢er_table_d, num * sizeof(int))!= hipSuccess) quit("error allocating device memory");
if( hipMalloc((void**) &switch_membership_d, num * sizeof(bool))!= hipSuccess) quit("error allocating device memory");
if( hipMalloc((void**) &p, num * sizeof(Point))!= hipSuccess) quit("error allocating device memory");
if( hipMalloc((void**) &coord_d, num * dim * sizeof(float))!= hipSuccess) quit("error allocating device memory");
}
void freeDevMem(){
hipFree(work_mem_d);
hipFree(center_table_d);
hipFree(switch_membership_d);
hipFree(p);
hipFree(coord_d);
hipHostFree(work_mem_h);
free(coord_h);
free(gl_lower);
}
float pgain( long x, Points *points, float z, long int *numcenters, int kmax, bool *is_center, int *center_table, bool *switch_membership,
double *serial, double *cpu_gpu_memcpy, double *memcpy_back, double *gpu_malloc, double *kernel)
{
hipSetDevice(0);
#ifdef PROFILE
double t1 = gettime();
#endif
int K = *numcenters ; // number of centers
int num = points->num; // number of points
int dim = points->dim; // number of dimension
kmax++;
/***** build center index table *****/
int count = 0;
for( int i=0; i<num; i++){
if( is_center[i] )
center_table[i] = count++;
}
#ifdef PROFILE
double t2 = gettime();
*serial += t2 - t1;
#endif
/***** initial memory allocation and preparation for transfer : execute once *****/
if( c == 0 ) {
#ifdef PROFILE
double t3 = gettime();
#endif
allocDevMem(num, dim, kmax);
#ifdef PROFILE
double t4 = gettime();
*gpu_malloc += t4 - t3;
#endif
coord_h = (float*) malloc( num * dim * sizeof(float)); // coordinates (host)
gl_lower = (float*) malloc( kmax * sizeof(float) );
hipHostMalloc( (void**)&work_mem_h, kmax * num * sizeof(float) );
/* prepare mapping for point coordinates */
for(int i=0; i<dim; i++){
for(int j=0; j<num; j++)
coord_h[ (num*i)+j ] = points->p[j].coord[i];
}
#ifdef PROFILE
double t5 = gettime();
*serial += t5 - t4;
#endif
/* copy coordinate to device memory */
hipMemcpy( switch_membership_d, switch_membership, num*sizeof(bool), hipMemcpyHostToDevice);
hipMemcpy( coord_d, coord_h, num*dim*sizeof(float), hipMemcpyHostToDevice);
#ifdef PROFILE
double t6 = gettime();
*cpu_gpu_memcpy += t6 - t5;
#endif
}
#ifdef PROFILE
double t7 = gettime();
#endif
/***** memory transfer from host to device *****/
/* copy to device memory */
hipMemcpy( center_table_d, center_table, num*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( p, points->p, num * sizeof(Point), hipMemcpyHostToDevice);
/* initialize device memory */
hipMemset( switch_membership_d, 0, num * sizeof(bool) );
hipMemset( work_mem_d, 0, kmax * num * sizeof(float) );
#ifdef PROFILE
double t8 = gettime();
*cpu_gpu_memcpy += t8 - t7;
#endif
/***** kernel execution *****/
/* Determine the number of thread blocks in the x- and y-dimension */
int num_blocks = (int) ((float) (num + THREADS_PER_BLOCK - 1) / (float) THREADS_PER_BLOCK);
int num_blocks_y = (int) ((float) (num_blocks + MAXBLOCKS - 1) / (float) MAXBLOCKS);
int num_blocks_x = (int) ((float) (num_blocks+num_blocks_y - 1) / (float) num_blocks_y);
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
size_t smSize = dim * sizeof(float);
#ifdef PROFILE
double t9 = gettime();
#endif
GpuProfiling::prepareProfiling( grid_size, THREADS_PER_BLOCK, smSize );
hipLaunchKernelGGL(( pgain_kernel), dim3(grid_size), dim3(THREADS_PER_BLOCK), smSize, 0,
num, // in: # of data
dim, // in: dimension of point coordinates
x, // in: point to open a center at
p, // out: data point array
K, // in: number of centers
coord_d, // in: array of point coordinates
work_mem_d, // out: cost and lower field array
center_table_d, // in: center index table
switch_membership_d // out: changes in membership
);
GpuProfiling::addResults("pgain_kernel");
hipDeviceSynchronize();
#ifdef PROFILE
double t10 = gettime();
*kernel += t10 - t9;
#endif
/***** copy back to host for CPU side work *****/
hipMemcpy(work_mem_h, work_mem_d, (K+1) *num*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(switch_membership, switch_membership_d, num * sizeof(bool), hipMemcpyDeviceToHost);
#ifdef PROFILE
double t11 = gettime();
*memcpy_back += t11 - t10;
#endif
/****** cpu side work *****/
int numclose = 0;
float gl_cost = z;
/* compute the number of centers to close if we are to open i */
for(int i=0; i < num; i++){
if( is_center[i] ) {
float low = z;
for( int j = 0; j < num; j++ )
low += work_mem_h[ j*(K+1) + center_table[i] ];
gl_lower[center_table[i]] = low;
if ( low > 0 ) {
numclose++;
work_mem_h[i*(K+1)+K] -= low;
}
}
gl_cost += work_mem_h[i*(K+1)+K];
}
/* if opening a center at x saves cost (i.e. cost is negative) do so
otherwise, do nothing */
if ( gl_cost < 0 ) {
for(int i=0; i<num; i++){
bool close_center = gl_lower[center_table[points->p[i].assign]] > 0 ;
if ( switch_membership[i] || close_center ) {
points->p[i].cost = points->p[i].weight * dist(points->p[i], points->p[x], points->dim);
points->p[i].assign = x;
}
}
for(int i=0; i<num; i++){
if( is_center[i] && gl_lower[center_table[i]] > 0 )
is_center[i] = false;
}
is_center[x] = true;
*numcenters = *numcenters +1 - numclose;
}
else
gl_cost = 0; // the value we'
#ifdef PROFILE
double t12 = gettime();
*serial += t12 - t11;
#endif
c++;
return -gl_cost;
}
| df49566ae66f91d6bab749bf178c223ae82fbe0f.cu | /***********************************************
streamcluster_cuda.cu
: parallelized code of streamcluster
- original code from PARSEC Benchmark Suite
- parallelization with CUDA API has been applied by
Sang-Ha (a.k.a Shawn) Lee - sl4ge@virginia.edu
University of Virginia
Department of Electrical and Computer Engineering
Department of Computer Science
***********************************************/
/* For a given point x, find the cost of the following operation:
* -- open a facility at x if there isn't already one there,
* -- for points y such that the assignment distance of y exceeds dist(y, x),
* make y a member of x,
* -- for facilities y such that reassigning y and all its members to x
* would save cost, realize this closing and reassignment.
*
* If the cost of this operation is negative (i.e., if this entire operation
* saves cost), perform this operation and return the amount of cost saved;
* otherwise, do nothing.
*/
/* numcenters will be updated to reflect the new number of centers */
/* z is the facility cost, x is the number of this point in the array
points */
#include "streamcluster_header.cu"
#include "prof.cu"
using namespace std;
#define THREADS_PER_BLOCK 512
#define MAXBLOCKS 65536
#define PROFILE
/* host memory analogous to device memory */
float *work_mem_h;
static float *coord_h;
float *gl_lower;
Point *p;
/* device memory */
float *work_mem_d;
float *coord_d;
int *center_table_d;
bool *switch_membership_d;
static int c; // counters
/* kernel */
__global__ void
pgain_kernel( int num,
int dim,
long x,
Point *p,
int K,
float *coord_d,
float *work_mem_d,
int *center_table_d,
bool *switch_membership_d
)
{
/* block ID and global thread ID */
const int block_id = blockIdx.x + gridDim.x * blockIdx.y;
const int thread_id = blockDim.x * block_id + threadIdx.x;
extern __shared__ float coord_s[]; // shared memory for coordinate of point[x]
/* coordinate mapping of point[x] to shared mem */
if(threadIdx.x == 0)
for(int i=0; i<dim; i++) { coord_s[i] = coord_d[i*num + x]; }
__syncthreads();
/* cost between this point and point[x]: euclidean distance multiplied by weight */
float x_cost = 0.0;
for(int i=0; i<dim; i++)
x_cost += (coord_d[(i*num)+thread_id]-coord_s[i]) * (coord_d[(i*num)+thread_id]-coord_s[i]);
x_cost = x_cost * p[thread_id].weight;
float current_cost = p[thread_id].cost;
/* if computed cost is less then original (it saves), mark it as to reassign */
float *lower = &work_mem_d[thread_id*(K+1)];
if ( x_cost < current_cost ) {
switch_membership_d[thread_id] = 1;
lower[K] += x_cost - current_cost;
}
/* if computed cost is larger, save the difference */
else {
int assign = p[thread_id].assign;
lower[center_table_d[assign]] += current_cost - x_cost;
}
}
void quit(char *message){
printf("%s\n", message);
exit(1);
}
void allocDevMem(int num, int dim, int kmax){
if( cudaMalloc((void**) &work_mem_d, kmax * num * sizeof(float))!= cudaSuccess) quit("error allocating device memory");
if( cudaMalloc((void**) ¢er_table_d, num * sizeof(int))!= cudaSuccess) quit("error allocating device memory");
if( cudaMalloc((void**) &switch_membership_d, num * sizeof(bool))!= cudaSuccess) quit("error allocating device memory");
if( cudaMalloc((void**) &p, num * sizeof(Point))!= cudaSuccess) quit("error allocating device memory");
if( cudaMalloc((void**) &coord_d, num * dim * sizeof(float))!= cudaSuccess) quit("error allocating device memory");
}
void freeDevMem(){
cudaFree(work_mem_d);
cudaFree(center_table_d);
cudaFree(switch_membership_d);
cudaFree(p);
cudaFree(coord_d);
cudaFreeHost(work_mem_h);
free(coord_h);
free(gl_lower);
}
float pgain( long x, Points *points, float z, long int *numcenters, int kmax, bool *is_center, int *center_table, bool *switch_membership,
double *serial, double *cpu_gpu_memcpy, double *memcpy_back, double *gpu_malloc, double *kernel)
{
cudaSetDevice(0);
#ifdef PROFILE
double t1 = gettime();
#endif
int K = *numcenters ; // number of centers
int num = points->num; // number of points
int dim = points->dim; // number of dimension
kmax++;
/***** build center index table *****/
int count = 0;
for( int i=0; i<num; i++){
if( is_center[i] )
center_table[i] = count++;
}
#ifdef PROFILE
double t2 = gettime();
*serial += t2 - t1;
#endif
/***** initial memory allocation and preparation for transfer : execute once *****/
if( c == 0 ) {
#ifdef PROFILE
double t3 = gettime();
#endif
allocDevMem(num, dim, kmax);
#ifdef PROFILE
double t4 = gettime();
*gpu_malloc += t4 - t3;
#endif
coord_h = (float*) malloc( num * dim * sizeof(float)); // coordinates (host)
gl_lower = (float*) malloc( kmax * sizeof(float) );
cudaMallocHost( (void**)&work_mem_h, kmax * num * sizeof(float) );
/* prepare mapping for point coordinates */
for(int i=0; i<dim; i++){
for(int j=0; j<num; j++)
coord_h[ (num*i)+j ] = points->p[j].coord[i];
}
#ifdef PROFILE
double t5 = gettime();
*serial += t5 - t4;
#endif
/* copy coordinate to device memory */
cudaMemcpy( switch_membership_d, switch_membership, num*sizeof(bool), cudaMemcpyHostToDevice);
cudaMemcpy( coord_d, coord_h, num*dim*sizeof(float), cudaMemcpyHostToDevice);
#ifdef PROFILE
double t6 = gettime();
*cpu_gpu_memcpy += t6 - t5;
#endif
}
#ifdef PROFILE
double t7 = gettime();
#endif
/***** memory transfer from host to device *****/
/* copy to device memory */
cudaMemcpy( center_table_d, center_table, num*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( p, points->p, num * sizeof(Point), cudaMemcpyHostToDevice);
/* initialize device memory */
cudaMemset( switch_membership_d, 0, num * sizeof(bool) );
cudaMemset( work_mem_d, 0, kmax * num * sizeof(float) );
#ifdef PROFILE
double t8 = gettime();
*cpu_gpu_memcpy += t8 - t7;
#endif
/***** kernel execution *****/
/* Determine the number of thread blocks in the x- and y-dimension */
int num_blocks = (int) ((float) (num + THREADS_PER_BLOCK - 1) / (float) THREADS_PER_BLOCK);
int num_blocks_y = (int) ((float) (num_blocks + MAXBLOCKS - 1) / (float) MAXBLOCKS);
int num_blocks_x = (int) ((float) (num_blocks+num_blocks_y - 1) / (float) num_blocks_y);
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
size_t smSize = dim * sizeof(float);
#ifdef PROFILE
double t9 = gettime();
#endif
GpuProfiling::prepareProfiling( grid_size, THREADS_PER_BLOCK, smSize );
pgain_kernel<<< grid_size, THREADS_PER_BLOCK, smSize>>>(
num, // in: # of data
dim, // in: dimension of point coordinates
x, // in: point to open a center at
p, // out: data point array
K, // in: number of centers
coord_d, // in: array of point coordinates
work_mem_d, // out: cost and lower field array
center_table_d, // in: center index table
switch_membership_d // out: changes in membership
);
GpuProfiling::addResults("pgain_kernel");
cudaThreadSynchronize();
#ifdef PROFILE
double t10 = gettime();
*kernel += t10 - t9;
#endif
/***** copy back to host for CPU side work *****/
cudaMemcpy(work_mem_h, work_mem_d, (K+1) *num*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(switch_membership, switch_membership_d, num * sizeof(bool), cudaMemcpyDeviceToHost);
#ifdef PROFILE
double t11 = gettime();
*memcpy_back += t11 - t10;
#endif
/****** cpu side work *****/
int numclose = 0;
float gl_cost = z;
/* compute the number of centers to close if we are to open i */
for(int i=0; i < num; i++){
if( is_center[i] ) {
float low = z;
for( int j = 0; j < num; j++ )
low += work_mem_h[ j*(K+1) + center_table[i] ];
gl_lower[center_table[i]] = low;
if ( low > 0 ) {
numclose++;
work_mem_h[i*(K+1)+K] -= low;
}
}
gl_cost += work_mem_h[i*(K+1)+K];
}
/* if opening a center at x saves cost (i.e. cost is negative) do so
otherwise, do nothing */
if ( gl_cost < 0 ) {
for(int i=0; i<num; i++){
bool close_center = gl_lower[center_table[points->p[i].assign]] > 0 ;
if ( switch_membership[i] || close_center ) {
points->p[i].cost = points->p[i].weight * dist(points->p[i], points->p[x], points->dim);
points->p[i].assign = x;
}
}
for(int i=0; i<num; i++){
if( is_center[i] && gl_lower[center_table[i]] > 0 )
is_center[i] = false;
}
is_center[x] = true;
*numcenters = *numcenters +1 - numclose;
}
else
gl_cost = 0; // the value we'
#ifdef PROFILE
double t12 = gettime();
*serial += t12 - t11;
#endif
c++;
return -gl_cost;
}
|
df9607e5ca5710f203e4b68435bb27ff57b063cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <stdio.h>
#include <math.h> /* ceil */
// Max Threads per block in GeForce 210
#define TxB 512
//Kernel binarizacin de imagen
__global__
void image_binarization_kernel(const uchar4* const rgbaImage,
unsigned char* const outputImage,
int numRows, int numCols, int threshold)
{
// El mapeo de los componentes uchar4 aRGBA es:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//Nota: Ignoramos el canal alfa
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < numRows*numCols){
uchar4 px = rgbaImage[i]; // thread pixel to process
//printf( "Antes: R: %i G: %i B %i \n",px.x,px.y,px.z );
//Primero se convierte a escala de grises
unsigned char grey = .299f * px.x +
.587f * px.y +
.114f * px.z;
//Binarizamos (si gris > threshold ? 255 : 0)
outputImage[i] = ( grey > threshold) ? 255 : 0;
}
}
void image_binarization(uchar4* const d_rgbaImage,
unsigned char* const d_outputImage,
size_t numRows, size_t numCols, int threshold)
{
// Dado que no importa la posicion relativa de los pixels
// en este algoritmo, la estrategia para asignar hilos a
// bloques y rejillas sera sencillamente la de cubrir
// a todos los pixeles con hebras en el eje X
long long int total_px = numRows * numCols; // total pixels
long int grids_n = ceil(total_px / TxB); // grids numer
const dim3 blockSize(TxB, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
hipLaunchKernelGGL(( image_binarization_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_outputImage, numRows, numCols, threshold);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
} | df9607e5ca5710f203e4b68435bb27ff57b063cf.cu | #include "utils.h"
#include <stdio.h>
#include <math.h> /* ceil */
// Max Threads per block in GeForce 210
#define TxB 512
//Kernel binarización de imagen
__global__
void image_binarization_kernel(const uchar4* const rgbaImage,
unsigned char* const outputImage,
int numRows, int numCols, int threshold)
{
// El mapeo de los componentes uchar4 aRGBA es:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//Nota: Ignoramos el canal alfa
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < numRows*numCols){
uchar4 px = rgbaImage[i]; // thread pixel to process
//printf( "Antes: R: %i G: %i B %i \n",px.x,px.y,px.z );
//Primero se convierte a escala de grises
unsigned char grey = .299f * px.x +
.587f * px.y +
.114f * px.z;
//Binarizamos (si gris > threshold ? 255 : 0)
outputImage[i] = ( grey > threshold) ? 255 : 0;
}
}
void image_binarization(uchar4* const d_rgbaImage,
unsigned char* const d_outputImage,
size_t numRows, size_t numCols, int threshold)
{
// Dado que no importa la posicion relativa de los pixels
// en este algoritmo, la estrategia para asignar hilos a
// bloques y rejillas sera sencillamente la de cubrir
// a todos los pixeles con hebras en el eje X
long long int total_px = numRows * numCols; // total pixels
long int grids_n = ceil(total_px / TxB); // grids numer
const dim3 blockSize(TxB, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
image_binarization_kernel<<<gridSize, blockSize>>>(d_rgbaImage, d_outputImage, numRows, numCols, threshold);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
} |
4f35c544095d765ca1733c0aabb39489990f2427.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "nvstrings/NVStrings.h"
#include "nvstrings/NVText.h"
#include "../custring_view.cuh"
#include "../util.h"
struct porter_stemmer_measure_fn
{
custring_view_array d_strings;
custring_view* d_vowels;
Char y_char;
unsigned int* d_results;
__device__ bool is_consonant( custring_view* dstr, int index )
{
Char ch = dstr->at(index);
if( d_vowels->find(ch) >= 0 )
return false;
if( (ch != y_char) || (index==0) )
return true;
ch = dstr->at(index-1); // only if previous char
return d_vowels->find(ch)>=0; // is not a consonant
}
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int vcs = 0;
bool vowel_run = !is_consonant(dstr,0);
for( auto itr=dstr->begin(); itr!=dstr->end(); itr++ )
{
if( is_consonant(dstr,itr.position()) )
{
if( vowel_run )
vcs++;
vowel_run = false;
}
else
vowel_run = true;
}
d_results[idx] = vcs;
}
};
unsigned int NVText::porter_stemmer_measure(NVStrings& strs, const char* vowels, const char* y_char, unsigned int* results, bool bdevmem )
{
unsigned int count = strs.size();
if( count==0 )
return 0; // nothing to do
auto execpol = rmm::exec_policy(0);
// setup results vector
unsigned int* d_results = results;
if( !bdevmem )
d_results = device_alloc<unsigned int>(count,0);
if( vowels==nullptr )
vowels = "aeiou";
custring_view* d_vowels = custring_from_host(vowels);
if( y_char==nullptr )
y_char = "y";
Char char_y;
custring_view::char_to_Char(y_char,char_y);
// get the string pointers
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// do the measure
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
porter_stemmer_measure_fn{d_strings,d_vowels,char_y,d_results});
// done
if( !bdevmem )
{
CUDA_TRY( hipMemcpyAsync(results,d_results,count*sizeof(unsigned int),hipMemcpyDeviceToHost))
RMM_FREE(d_results,0);
}
RMM_FREE(d_vowels,0);
return 0;
} | 4f35c544095d765ca1733c0aabb39489990f2427.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "nvstrings/NVStrings.h"
#include "nvstrings/NVText.h"
#include "../custring_view.cuh"
#include "../util.h"
struct porter_stemmer_measure_fn
{
custring_view_array d_strings;
custring_view* d_vowels;
Char y_char;
unsigned int* d_results;
__device__ bool is_consonant( custring_view* dstr, int index )
{
Char ch = dstr->at(index);
if( d_vowels->find(ch) >= 0 )
return false;
if( (ch != y_char) || (index==0) )
return true;
ch = dstr->at(index-1); // only if previous char
return d_vowels->find(ch)>=0; // is not a consonant
}
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int vcs = 0;
bool vowel_run = !is_consonant(dstr,0);
for( auto itr=dstr->begin(); itr!=dstr->end(); itr++ )
{
if( is_consonant(dstr,itr.position()) )
{
if( vowel_run )
vcs++;
vowel_run = false;
}
else
vowel_run = true;
}
d_results[idx] = vcs;
}
};
unsigned int NVText::porter_stemmer_measure(NVStrings& strs, const char* vowels, const char* y_char, unsigned int* results, bool bdevmem )
{
unsigned int count = strs.size();
if( count==0 )
return 0; // nothing to do
auto execpol = rmm::exec_policy(0);
// setup results vector
unsigned int* d_results = results;
if( !bdevmem )
d_results = device_alloc<unsigned int>(count,0);
if( vowels==nullptr )
vowels = "aeiou";
custring_view* d_vowels = custring_from_host(vowels);
if( y_char==nullptr )
y_char = "y";
Char char_y;
custring_view::char_to_Char(y_char,char_y);
// get the string pointers
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// do the measure
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
porter_stemmer_measure_fn{d_strings,d_vowels,char_y,d_results});
// done
if( !bdevmem )
{
CUDA_TRY( cudaMemcpyAsync(results,d_results,count*sizeof(unsigned int),cudaMemcpyDeviceToHost))
RMM_FREE(d_results,0);
}
RMM_FREE(d_vowels,0);
return 0;
} |
9f09bc161ee5c8b8ff98ae6bd3be504aaa903ead.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_bfs.cu
*
* @brief Simple test driver program for breadth-first search.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <algorithm>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// BFS includes
#include <gunrock/app/bfs/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bfs;
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
"test <graph-type> [graph-type-arguments]\n"
"Graph type and graph type arguments:\n"
" market <matrix-market-file-name>\n"
" Reads a Matrix-Market coordinate-formatted graph of\n"
" directed/undirected edges from STDIN (or from the\n"
" optionally-specified file).\n"
" rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n"
" Generate R-MAT graph as input\n"
" --rmat_scale=<vertex-scale>\n"
" --rmat_nodes=<number-nodes>\n"
" --rmat_edgefactor=<edge-factor>\n"
" --rmat_edges=<number-edges>\n"
" --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n"
" --rmat_seed=<seed>\n"
" rgg (default: rgg_scale = 10, rgg_thfactor = 0.55)\n"
" Generate Random Geometry Graph as input\n"
" --rgg_scale=<vertex-scale>\n"
" --rgg_nodes=<number-nodes>\n"
" --rgg_thfactor=<threshold-factor>\n"
" --rgg_threshold=<threshold>\n"
" --rgg_vmultipiler=<vmultipiler>\n"
" --rgg_seed=<seed>\n\n"
"Optional arguments:\n"
"[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n"
"[--undirected] Treat the graph as undirected (symmetric).\n"
"[--idempotence] Whether or not to enable idempotent operation.\n"
"[--instrumented] Keep kernels statics [Default: Disable].\n"
" total_queued, search_depth and barrier duty.\n"
" (a relative indicator of load imbalance.)\n"
"[--src=<Vertex-ID|randomize|largestdegree>]\n"
" Begins traversal from the source (Default: 0).\n"
" If randomize: from a random source vertex.\n"
" If largestdegree: from largest degree vertex.\n"
"[--quick] Skip the CPU reference validation process.\n"
"[--mark-pred] Keep both label info and predecessor info.\n"
"[--disable-size-check] Disable frontier queue size check.\n"
"[--grid-size=<grid size>] Maximum allowed grid size setting.\n"
"[--queue-sizing=<factor>] Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>). (Default: 1.0)\n"
"[--in-sizing=<in/out_queue_scale_factor>]\n"
" Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>). (Default: 1.0)\n"
"[--v] Print verbose per iteration debug info.\n"
"[--iteration-num=<num>] Number of runs to perform the test.\n"
"[--traversal-mode=<0|1>] Set traversal strategy, 0 for Load-Balanced\n"
" 1 for Dynamic-Cooperative (Default: dynamic\n"
" determine based on average degree).\n"
"[--partition_method=<random|biasrandom|clustered|metis>]\n"
" Choose partitioner (Default use random).\n"
"[--quiet] No output (unless --json is specified).\n"
"[--json] Output JSON-format statistics to STDOUT.\n"
"[--jsonfile=<name>] Output JSON-format statistics to file <name>\n"
"[--jsondir=<dir>] Output JSON-format statistics to <dir>/name,\n"
" where name is auto-generated.\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @tparam VertexId
* @tparam SizeT
* @tparam MARK_PREDECESSORS
* @tparam ENABLE_IDEMPOTENCE
*
* @param[in] labels Search depth from the source for each node.
* @param[in] preds Predecessor node id for each node.
* @param[in] num_nodes Number of nodes in the graph.
* @param[in] quiet Don't print out anything to stdout
*/
template <
typename VertexId,
typename SizeT,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE >
void DisplaySolution(
VertexId *labels,
VertexId *preds,
SizeT num_nodes,
bool quiet = false)
{
if (quiet) { return; }
// careful: if later code in this
// function changes something, this
// return is the wrong thing to do
if (num_nodes > 40) { num_nodes = 40; }
printf("\nFirst %d labels of the GPU result:\n", num_nodes);
printf("[");
for (VertexId i = 0; i < num_nodes; ++i)
{
PrintValue(i);
printf(":");
PrintValue(labels[i]);
if (MARK_PREDECESSORS && !ENABLE_IDEMPOTENCE)
{
printf(",");
PrintValue(preds[i]);
}
printf(" ");
}
printf("]\n");
}
/******************************************************************************
* BFS Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference BFS ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam MARK_PREDECESSORS
* @tparam ENABLE_IDEMPOTENCE
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] source_path Host-side vector to store CPU computed labels for each node
* @param[in] predecessor Host-side vector to store CPU computed predecessor for each node
* @param[in] src Source node where BFS starts
* @param[in] quiet Don't print out anything to stdout
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE >
void ReferenceBFS(
const Csr<VertexId, Value, SizeT> *graph,
VertexId *source_path,
VertexId *predecessor,
VertexId src,
bool quiet = false)
{
// Initialize labels
for (VertexId i = 0; i < graph->nodes; ++i)
{
source_path[i] = ENABLE_IDEMPOTENCE ? -1 : util::MaxValue<VertexId>() - 1;
if (MARK_PREDECESSORS)
{
predecessor[i] = -1;
}
}
source_path[src] = 0;
VertexId search_depth = 0;
// Initialize queue for managing previously-discovered nodes
std::deque<VertexId> frontier;
frontier.push_back(src);
// Perform BFS
CpuTimer cpu_timer;
cpu_timer.Start();
while (!frontier.empty())
{
// Dequeue node from frontier
VertexId dequeued_node = frontier.front();
frontier.pop_front();
VertexId neighbor_dist = source_path[dequeued_node] + 1;
// Locate adjacency list
SizeT edges_begin = graph->row_offsets[dequeued_node];
SizeT edges_end = graph->row_offsets[dequeued_node + 1];
for (SizeT edge = edges_begin; edge < edges_end; ++edge)
{
//Lookup neighbor and enqueue if undiscovered
VertexId neighbor = graph->column_indices[edge];
if (source_path[neighbor] > neighbor_dist || source_path[neighbor] == -1)
{
source_path[neighbor] = neighbor_dist;
if (MARK_PREDECESSORS)
{
predecessor[neighbor] = dequeued_node;
}
if (search_depth < neighbor_dist)
{
search_depth = neighbor_dist;
}
frontier.push_back(neighbor);
}
}
}
if (MARK_PREDECESSORS)
{
predecessor[src] = -1;
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
search_depth++;
if (!quiet)
{
printf("CPU BFS finished in %lf msec. cpu_search_depth: %d\n",
elapsed, search_depth);
}
}
/**
* @brief Run BFS tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
* @tparam SIZE_CHECK
* @tparam MARK_PREDECESSORS
* @tparam ENABLE_IDEMPOTENCE
*
* @param[in] info Pointer to info contains parameters and statistics.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE >
void RunTests(Info<VertexId, Value, SizeT> *info)
{
typedef BFSProblem < VertexId,
SizeT,
Value,
MARK_PREDECESSORS,
ENABLE_IDEMPOTENCE,
(MARK_PREDECESSORS && ENABLE_IDEMPOTENCE) >
BfsProblem; // does not use double buffer
typedef BFSEnactor < BfsProblem,
INSTRUMENT,
DEBUG,
SIZE_CHECK >
BfsEnactor;
// parse configurations from mObject info
Csr<VertexId, Value, SizeT> *graph = info->csr_ptr;
VertexId src = info->info["source_vertex"].get_int64();
int max_grid_size = info->info["max_grid_size"].get_int();
int num_gpus = info->info["num_gpus"].get_int();
double max_queue_sizing = info->info["max_queue_sizing"].get_real();
double max_queue_sizing1 = info->info["max_queue_sizing1"].get_real();
double max_in_sizing = info->info["max_in_sizing"].get_real();
std::string partition_method = info->info["partition_method"].get_str();
double partition_factor = info->info["partition_factor"].get_real();
int partition_seed = info->info["partition_seed"].get_int();
bool quiet_mode = info->info["quiet_mode"].get_bool();
bool quick_mode = info->info["quick_mode"].get_bool();
bool stream_from_host = info->info["stream_from_host"].get_bool();
int traversal_mode = info->info["traversal_mode"].get_int();
int iterations = 1; //disable since doesn't support mgpu stop condition. info->info["num_iteration"].get_int();
json_spirit::mArray device_list = info->info["device_list"].get_array();
int* gpu_idx = new int[num_gpus];
for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int();
// TODO: remove after merge mgpu-cq
ContextPtr *context = (ContextPtr*) info->context;
hipStream_t *streams = (hipStream_t*)info->streams;
// allocate host-side label array (for both reference and GPU results)
VertexId *reference_labels = new VertexId[graph->nodes];
VertexId *reference_preds = new VertexId[graph->nodes];
VertexId *h_labels = new VertexId[graph->nodes];
VertexId *reference_check_label = (quick_mode) ? NULL : reference_labels;
VertexId *reference_check_preds = NULL;
VertexId *h_preds = NULL;
if (MARK_PREDECESSORS)
{
h_preds = new VertexId[graph->nodes];
if (!quick_mode)
{
reference_check_preds = reference_preds;
}
}
size_t *org_size = new size_t[num_gpus];
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t dummy;
hipSetDevice(gpu_idx[gpu]);
hipMemGetInfo(&(org_size[gpu]), &dummy);
}
BfsEnactor* enactor = new BfsEnactor(num_gpus, gpu_idx); // enactor map
BfsProblem* problem = new BfsProblem; // allocate problem on GPU
util::GRError(problem->Init(
stream_from_host,
graph,
NULL,
num_gpus,
gpu_idx,
partition_method,
streams,
max_queue_sizing,
max_in_sizing,
partition_factor,
partition_seed),
"BFS Problem Init failed", __FILE__, __LINE__);
util::GRError(enactor->Init(
context, problem, max_grid_size, traversal_mode),
"BFS Enactor Init failed", __FILE__, __LINE__);
// compute reference CPU BFS solution for source-distance
if (reference_check_label != NULL)
{
if (!quiet_mode)
{
printf("Computing reference value ...\n");
}
ReferenceBFS<VertexId, Value, SizeT,
MARK_PREDECESSORS, ENABLE_IDEMPOTENCE>(
graph, reference_check_label,
reference_check_preds, src, quiet_mode);
if (!quiet_mode)
{
printf("\n");
}
}
// perform BFS
double elapsed = 0.0f;
CpuTimer cpu_timer;
for (int iter = 0; iter < iterations; ++iter)
{
util::GRError(problem->Reset(
src, enactor->GetFrontierType(),
max_queue_sizing, max_queue_sizing1),
"BFS Problem Data Reset Failed", __FILE__, __LINE__);
util::GRError(enactor->Reset(),
"BFS Enactor Reset failed", __FILE__, __LINE__);
util::GRError("Error before Enact", __FILE__, __LINE__);
if (!quiet_mode)
{
printf("__________________________\n"); fflush(stdout);
}
cpu_timer.Start();
util::GRError(enactor->Enact(src, traversal_mode),
"BFS Problem Enact Failed", __FILE__, __LINE__);
cpu_timer.Stop();
if (!quiet_mode)
{
printf("--------------------------\n"); fflush(stdout);
}
elapsed += cpu_timer.ElapsedMillis();
}
elapsed /= iterations;
// copy out results
util::GRError(problem->Extract(h_labels, h_preds),
"BFS Problem Data Extraction Failed", __FILE__, __LINE__);
// verify the result
if (reference_check_label != NULL)
{
if (!ENABLE_IDEMPOTENCE)
{
if (!quiet_mode)
{
printf("Label Validity: ");
}
int error_num = CompareResults(
h_labels, reference_check_label,
graph->nodes, true, quiet_mode);
if (error_num > 0)
{
if (!quiet_mode)
{
printf("%d errors occurred.\n", error_num);
}
}
}
else
{
if (!MARK_PREDECESSORS)
{
if (!quiet_mode)
{
printf("Label Validity: ");
}
int error_num = CompareResults(
h_labels, reference_check_label,
graph->nodes, true, quiet_mode);
if (error_num > 0)
{
if (!quiet_mode)
{
printf("%d errors occurred.\n", error_num);
}
}
}
}
}
// display Solution
if (!quiet_mode)
{
DisplaySolution<VertexId, SizeT, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE>
(h_labels, h_preds, graph->nodes, quiet_mode);
}
info->ComputeTraversalStats( // compute running statistics
enactor->enactor_stats.GetPointer(), elapsed, h_labels);
if (!quiet_mode)
{
info->DisplayStats(); // display collected statistics
}
info->CollectInfo(); // collected all the info and put into JSON mObject
if (!quiet_mode)
{
printf("\n\tMemory Usage(B)\t");
for (int gpu = 0; gpu < num_gpus; gpu++)
if (num_gpus > 1)
{
if (gpu != 0)
{
printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1",
gpu, gpu, gpu, gpu);
}
else
{
printf(" #keys%d,0\t #keys%d,1", gpu, gpu);
}
}
else
{
printf(" #keys%d,0\t #keys%d,1", gpu, gpu);
}
if (num_gpus > 1)
{
printf(" #keys%d", num_gpus);
}
printf("\n");
double max_queue_sizing_[2] = {0, 0 }, max_in_sizing_ = 0;
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t gpu_free, dummy;
hipSetDevice(gpu_idx[gpu]);
hipMemGetInfo(&gpu_free, &dummy);
printf("GPU_%d\t %ld", gpu_idx[gpu], org_size[gpu] - gpu_free);
for (int i = 0; i < num_gpus; i++)
{
for (int j = 0; j < 2; j++)
{
SizeT x = problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize();
printf("\t %lld", (long long) x);
double factor = 1.0 * x / (num_gpus > 1 ? problem->graph_slices[gpu]->in_counter[i] : problem->graph_slices[gpu]->nodes);
if (factor > max_queue_sizing_[j])
{
max_queue_sizing_[j] = factor;
}
}
if (num_gpus > 1 && i != 0 )
{
for (int t = 0; t < 2; t++)
{
SizeT x = problem->data_slices[gpu][0].keys_in[t][i].GetSize();
printf("\t %lld", (long long) x);
double factor = 1.0 * x / problem->graph_slices[gpu]->in_counter[i];
if (factor > max_in_sizing_)
{
max_in_sizing_ = factor;
}
}
}
}
if (num_gpus > 1)
{
printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize()));
}
printf("\n");
}
printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]);
if (num_gpus > 1)
{
printf("\t in_sizing =\t %lf", max_in_sizing_);
}
printf("\n");
}
// Clean up
if (org_size ) {delete[] org_size ; org_size = NULL;}
if (enactor ) {delete enactor ; enactor = NULL;}
if (problem ) {delete problem ; problem = NULL;}
if (reference_labels) {delete[] reference_labels; reference_labels = NULL;}
if (reference_preds ) {delete[] reference_preds ; reference_preds = NULL;}
if (h_labels ) {delete[] h_labels ; h_labels = NULL;}
if (h_preds ) {delete[] h_preds ; h_preds = NULL;}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
* @tparam SIZE_CHECK
* @tparam MARK_PREDECESSORS
*
* @param[in] info Pointer to info contains parameters and statistics.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK,
bool MARK_PREDECESSORS >
void RunTests_enable_idempotence(Info<VertexId, Value, SizeT> *info)
{
if (info->info["idempotent"].get_bool())
{
RunTests <VertexId, Value, SizeT, INSTRUMENT, DEBUG, SIZE_CHECK,
MARK_PREDECESSORS, true > (info);
}
else
{
RunTests <VertexId, Value, SizeT, INSTRUMENT, DEBUG, SIZE_CHECK,
MARK_PREDECESSORS, false> (info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
* @tparam SIZE_CHECK
*
* @param[in] info Pointer to info contains parameters and statistics.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK >
void RunTests_mark_predecessors(Info<VertexId, Value, SizeT> *info)
{
if (info->info["mark_predecessors"].get_bool())
{
RunTests_enable_idempotence<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, SIZE_CHECK, true> (info);
}
else
{
RunTests_enable_idempotence<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, SIZE_CHECK, false> (info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
*
* @param[in] info Pointer to info contains parameters and statistics.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG >
void RunTests_size_check(Info<VertexId, Value, SizeT> *info)
{
if (info->info["size_check"].get_bool())
{
RunTests_mark_predecessors<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, true>(info);
}
else
{
RunTests_mark_predecessors<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, false>(info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] info Pointer to info contains parameters and statistics.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT >
void RunTests_debug(Info<VertexId, Value, SizeT> *info)
{
if (info->info["debug_mode"].get_bool())
{
RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, true>(info);
}
else
{
RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, false>(info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] info Pointer to info contains parameters and statistics.
*/
template <
typename VertexId,
typename Value,
typename SizeT >
void RunTests_instrumented(Info<VertexId, Value, SizeT> *info)
{
if (info->info["instrument"].get_bool())
{
RunTests_debug<VertexId, Value, SizeT, true>(info);
}
else
{
RunTests_debug<VertexId, Value, SizeT, false>(info);
}
}
template <
typename VertexId,
typename Value,
typename SizeT >
void Write_gr(Info<VertexId, Value, SizeT> *info, CommandLineArgs *args)
{
Csr<VertexId, Value, SizeT> *graph = info -> csr_ptr;
std::string filename="";
std::ofstream gr_file;
SizeT edge_counter = 0;
bool keep_num = args->CheckCmdLineFlag("keep-num");
args->GetCmdLineArgument("output-filename", filename);
gr_file.open(filename.c_str());
if (args->CheckCmdLineFlag("include-header"))
gr_file<<graph -> nodes <<" "<<graph->nodes<<" "<<graph->edges<<std::endl;
for (VertexId u=0; u< graph->nodes; u++)
{
for (SizeT i=graph -> row_offsets[u];
i<graph -> row_offsets[u+1]; i++)
{
VertexId v = graph -> column_indices[i];
if (keep_num)
gr_file<<u+1<<" "<<v+1;
else gr_file<<u<<" "<<v;
if (graph -> edge_values != NULL)
gr_file<<" "<<graph -> edge_values[i];
gr_file<<std::endl;
edge_counter ++;
}
}
gr_file.close();
printf("%lld nodes and %lld edges written into gr file %s\n",
(long long)graph -> nodes, (long long)edge_counter,
filename.c_str());
}
/******************************************************************************
* Main
******************************************************************************/
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
int graph_args = argc - args.ParsedArgc() - 1;
if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help"))
{
Usage();
return 1;
}
typedef int VertexId; // Use int as the vertex identifier
typedef int Value; // Use int as the value type
typedef int SizeT; // Use int as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // graph we process on
Info<VertexId, Value, SizeT> *info = new Info<VertexId, Value, SizeT>;
// graph construction or generation related parameters
info->info["undirected"] = args.CheckCmdLineFlag("undirected");
info->info["edge_value"] = args.CheckCmdLineFlag("edge_value");
info->Init("BFS", args, csr); // initialize Info structure
//RunTests_instrumented<VertexId, Value, SizeT>(info); // run test
Write_gr<VertexId, Value, SizeT>(info, &args);
return 0;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 9f09bc161ee5c8b8ff98ae6bd3be504aaa903ead.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_bfs.cu
*
* @brief Simple test driver program for breadth-first search.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <algorithm>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// BFS includes
#include <gunrock/app/bfs/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bfs;
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
"test <graph-type> [graph-type-arguments]\n"
"Graph type and graph type arguments:\n"
" market <matrix-market-file-name>\n"
" Reads a Matrix-Market coordinate-formatted graph of\n"
" directed/undirected edges from STDIN (or from the\n"
" optionally-specified file).\n"
" rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n"
" Generate R-MAT graph as input\n"
" --rmat_scale=<vertex-scale>\n"
" --rmat_nodes=<number-nodes>\n"
" --rmat_edgefactor=<edge-factor>\n"
" --rmat_edges=<number-edges>\n"
" --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n"
" --rmat_seed=<seed>\n"
" rgg (default: rgg_scale = 10, rgg_thfactor = 0.55)\n"
" Generate Random Geometry Graph as input\n"
" --rgg_scale=<vertex-scale>\n"
" --rgg_nodes=<number-nodes>\n"
" --rgg_thfactor=<threshold-factor>\n"
" --rgg_threshold=<threshold>\n"
" --rgg_vmultipiler=<vmultipiler>\n"
" --rgg_seed=<seed>\n\n"
"Optional arguments:\n"
"[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n"
"[--undirected] Treat the graph as undirected (symmetric).\n"
"[--idempotence] Whether or not to enable idempotent operation.\n"
"[--instrumented] Keep kernels statics [Default: Disable].\n"
" total_queued, search_depth and barrier duty.\n"
" (a relative indicator of load imbalance.)\n"
"[--src=<Vertex-ID|randomize|largestdegree>]\n"
" Begins traversal from the source (Default: 0).\n"
" If randomize: from a random source vertex.\n"
" If largestdegree: from largest degree vertex.\n"
"[--quick] Skip the CPU reference validation process.\n"
"[--mark-pred] Keep both label info and predecessor info.\n"
"[--disable-size-check] Disable frontier queue size check.\n"
"[--grid-size=<grid size>] Maximum allowed grid size setting.\n"
"[--queue-sizing=<factor>] Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>). (Default: 1.0)\n"
"[--in-sizing=<in/out_queue_scale_factor>]\n"
" Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>). (Default: 1.0)\n"
"[--v] Print verbose per iteration debug info.\n"
"[--iteration-num=<num>] Number of runs to perform the test.\n"
"[--traversal-mode=<0|1>] Set traversal strategy, 0 for Load-Balanced\n"
" 1 for Dynamic-Cooperative (Default: dynamic\n"
" determine based on average degree).\n"
"[--partition_method=<random|biasrandom|clustered|metis>]\n"
" Choose partitioner (Default use random).\n"
"[--quiet] No output (unless --json is specified).\n"
"[--json] Output JSON-format statistics to STDOUT.\n"
"[--jsonfile=<name>] Output JSON-format statistics to file <name>\n"
"[--jsondir=<dir>] Output JSON-format statistics to <dir>/name,\n"
" where name is auto-generated.\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @tparam VertexId
* @tparam SizeT
* @tparam MARK_PREDECESSORS
* @tparam ENABLE_IDEMPOTENCE
*
* @param[in] labels Search depth from the source for each node.
* @param[in] preds Predecessor node id for each node.
* @param[in] num_nodes Number of nodes in the graph.
* @param[in] quiet Don't print out anything to stdout
*/
template <
typename VertexId,
typename SizeT,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE >
void DisplaySolution(
VertexId *labels,
VertexId *preds,
SizeT num_nodes,
bool quiet = false)
{
if (quiet) { return; }
// careful: if later code in this
// function changes something, this
// return is the wrong thing to do
if (num_nodes > 40) { num_nodes = 40; }
printf("\nFirst %d labels of the GPU result:\n", num_nodes);
printf("[");
for (VertexId i = 0; i < num_nodes; ++i)
{
PrintValue(i);
printf(":");
PrintValue(labels[i]);
if (MARK_PREDECESSORS && !ENABLE_IDEMPOTENCE)
{
printf(",");
PrintValue(preds[i]);
}
printf(" ");
}
printf("]\n");
}
/******************************************************************************
* BFS Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference BFS ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam MARK_PREDECESSORS
* @tparam ENABLE_IDEMPOTENCE
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] source_path Host-side vector to store CPU computed labels for each node
* @param[in] predecessor Host-side vector to store CPU computed predecessor for each node
* @param[in] src Source node where BFS starts
* @param[in] quiet Don't print out anything to stdout
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE >
void ReferenceBFS(
const Csr<VertexId, Value, SizeT> *graph,
VertexId *source_path,
VertexId *predecessor,
VertexId src,
bool quiet = false)
{
// Initialize labels
for (VertexId i = 0; i < graph->nodes; ++i)
{
source_path[i] = ENABLE_IDEMPOTENCE ? -1 : util::MaxValue<VertexId>() - 1;
if (MARK_PREDECESSORS)
{
predecessor[i] = -1;
}
}
source_path[src] = 0;
VertexId search_depth = 0;
// Initialize queue for managing previously-discovered nodes
std::deque<VertexId> frontier;
frontier.push_back(src);
// Perform BFS
CpuTimer cpu_timer;
cpu_timer.Start();
while (!frontier.empty())
{
// Dequeue node from frontier
VertexId dequeued_node = frontier.front();
frontier.pop_front();
VertexId neighbor_dist = source_path[dequeued_node] + 1;
// Locate adjacency list
SizeT edges_begin = graph->row_offsets[dequeued_node];
SizeT edges_end = graph->row_offsets[dequeued_node + 1];
for (SizeT edge = edges_begin; edge < edges_end; ++edge)
{
//Lookup neighbor and enqueue if undiscovered
VertexId neighbor = graph->column_indices[edge];
if (source_path[neighbor] > neighbor_dist || source_path[neighbor] == -1)
{
source_path[neighbor] = neighbor_dist;
if (MARK_PREDECESSORS)
{
predecessor[neighbor] = dequeued_node;
}
if (search_depth < neighbor_dist)
{
search_depth = neighbor_dist;
}
frontier.push_back(neighbor);
}
}
}
if (MARK_PREDECESSORS)
{
predecessor[src] = -1;
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
search_depth++;
if (!quiet)
{
printf("CPU BFS finished in %lf msec. cpu_search_depth: %d\n",
elapsed, search_depth);
}
}
/**
* @brief Run BFS tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
* @tparam SIZE_CHECK
* @tparam MARK_PREDECESSORS
* @tparam ENABLE_IDEMPOTENCE
*
* @param[in] info Pointer to info contains parameters and statistics.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE >
void RunTests(Info<VertexId, Value, SizeT> *info)
{
typedef BFSProblem < VertexId,
SizeT,
Value,
MARK_PREDECESSORS,
ENABLE_IDEMPOTENCE,
(MARK_PREDECESSORS && ENABLE_IDEMPOTENCE) >
BfsProblem; // does not use double buffer
typedef BFSEnactor < BfsProblem,
INSTRUMENT,
DEBUG,
SIZE_CHECK >
BfsEnactor;
// parse configurations from mObject info
Csr<VertexId, Value, SizeT> *graph = info->csr_ptr;
VertexId src = info->info["source_vertex"].get_int64();
int max_grid_size = info->info["max_grid_size"].get_int();
int num_gpus = info->info["num_gpus"].get_int();
double max_queue_sizing = info->info["max_queue_sizing"].get_real();
double max_queue_sizing1 = info->info["max_queue_sizing1"].get_real();
double max_in_sizing = info->info["max_in_sizing"].get_real();
std::string partition_method = info->info["partition_method"].get_str();
double partition_factor = info->info["partition_factor"].get_real();
int partition_seed = info->info["partition_seed"].get_int();
bool quiet_mode = info->info["quiet_mode"].get_bool();
bool quick_mode = info->info["quick_mode"].get_bool();
bool stream_from_host = info->info["stream_from_host"].get_bool();
int traversal_mode = info->info["traversal_mode"].get_int();
int iterations = 1; //disable since doesn't support mgpu stop condition. info->info["num_iteration"].get_int();
json_spirit::mArray device_list = info->info["device_list"].get_array();
int* gpu_idx = new int[num_gpus];
for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int();
// TODO: remove after merge mgpu-cq
ContextPtr *context = (ContextPtr*) info->context;
cudaStream_t *streams = (cudaStream_t*)info->streams;
// allocate host-side label array (for both reference and GPU results)
VertexId *reference_labels = new VertexId[graph->nodes];
VertexId *reference_preds = new VertexId[graph->nodes];
VertexId *h_labels = new VertexId[graph->nodes];
VertexId *reference_check_label = (quick_mode) ? NULL : reference_labels;
VertexId *reference_check_preds = NULL;
VertexId *h_preds = NULL;
if (MARK_PREDECESSORS)
{
h_preds = new VertexId[graph->nodes];
if (!quick_mode)
{
reference_check_preds = reference_preds;
}
}
size_t *org_size = new size_t[num_gpus];
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t dummy;
cudaSetDevice(gpu_idx[gpu]);
cudaMemGetInfo(&(org_size[gpu]), &dummy);
}
BfsEnactor* enactor = new BfsEnactor(num_gpus, gpu_idx); // enactor map
BfsProblem* problem = new BfsProblem; // allocate problem on GPU
util::GRError(problem->Init(
stream_from_host,
graph,
NULL,
num_gpus,
gpu_idx,
partition_method,
streams,
max_queue_sizing,
max_in_sizing,
partition_factor,
partition_seed),
"BFS Problem Init failed", __FILE__, __LINE__);
util::GRError(enactor->Init(
context, problem, max_grid_size, traversal_mode),
"BFS Enactor Init failed", __FILE__, __LINE__);
// compute reference CPU BFS solution for source-distance
if (reference_check_label != NULL)
{
if (!quiet_mode)
{
printf("Computing reference value ...\n");
}
ReferenceBFS<VertexId, Value, SizeT,
MARK_PREDECESSORS, ENABLE_IDEMPOTENCE>(
graph, reference_check_label,
reference_check_preds, src, quiet_mode);
if (!quiet_mode)
{
printf("\n");
}
}
// perform BFS
double elapsed = 0.0f;
CpuTimer cpu_timer;
for (int iter = 0; iter < iterations; ++iter)
{
util::GRError(problem->Reset(
src, enactor->GetFrontierType(),
max_queue_sizing, max_queue_sizing1),
"BFS Problem Data Reset Failed", __FILE__, __LINE__);
util::GRError(enactor->Reset(),
"BFS Enactor Reset failed", __FILE__, __LINE__);
util::GRError("Error before Enact", __FILE__, __LINE__);
if (!quiet_mode)
{
printf("__________________________\n"); fflush(stdout);
}
cpu_timer.Start();
util::GRError(enactor->Enact(src, traversal_mode),
"BFS Problem Enact Failed", __FILE__, __LINE__);
cpu_timer.Stop();
if (!quiet_mode)
{
printf("--------------------------\n"); fflush(stdout);
}
elapsed += cpu_timer.ElapsedMillis();
}
elapsed /= iterations;
// copy out results
util::GRError(problem->Extract(h_labels, h_preds),
"BFS Problem Data Extraction Failed", __FILE__, __LINE__);
// verify the result
if (reference_check_label != NULL)
{
if (!ENABLE_IDEMPOTENCE)
{
if (!quiet_mode)
{
printf("Label Validity: ");
}
int error_num = CompareResults(
h_labels, reference_check_label,
graph->nodes, true, quiet_mode);
if (error_num > 0)
{
if (!quiet_mode)
{
printf("%d errors occurred.\n", error_num);
}
}
}
else
{
if (!MARK_PREDECESSORS)
{
if (!quiet_mode)
{
printf("Label Validity: ");
}
int error_num = CompareResults(
h_labels, reference_check_label,
graph->nodes, true, quiet_mode);
if (error_num > 0)
{
if (!quiet_mode)
{
printf("%d errors occurred.\n", error_num);
}
}
}
}
}
// display Solution
if (!quiet_mode)
{
DisplaySolution<VertexId, SizeT, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE>
(h_labels, h_preds, graph->nodes, quiet_mode);
}
info->ComputeTraversalStats( // compute running statistics
enactor->enactor_stats.GetPointer(), elapsed, h_labels);
if (!quiet_mode)
{
info->DisplayStats(); // display collected statistics
}
info->CollectInfo(); // collected all the info and put into JSON mObject
if (!quiet_mode)
{
printf("\n\tMemory Usage(B)\t");
for (int gpu = 0; gpu < num_gpus; gpu++)
if (num_gpus > 1)
{
if (gpu != 0)
{
printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1",
gpu, gpu, gpu, gpu);
}
else
{
printf(" #keys%d,0\t #keys%d,1", gpu, gpu);
}
}
else
{
printf(" #keys%d,0\t #keys%d,1", gpu, gpu);
}
if (num_gpus > 1)
{
printf(" #keys%d", num_gpus);
}
printf("\n");
double max_queue_sizing_[2] = {0, 0 }, max_in_sizing_ = 0;
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t gpu_free, dummy;
cudaSetDevice(gpu_idx[gpu]);
cudaMemGetInfo(&gpu_free, &dummy);
printf("GPU_%d\t %ld", gpu_idx[gpu], org_size[gpu] - gpu_free);
for (int i = 0; i < num_gpus; i++)
{
for (int j = 0; j < 2; j++)
{
SizeT x = problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize();
printf("\t %lld", (long long) x);
double factor = 1.0 * x / (num_gpus > 1 ? problem->graph_slices[gpu]->in_counter[i] : problem->graph_slices[gpu]->nodes);
if (factor > max_queue_sizing_[j])
{
max_queue_sizing_[j] = factor;
}
}
if (num_gpus > 1 && i != 0 )
{
for (int t = 0; t < 2; t++)
{
SizeT x = problem->data_slices[gpu][0].keys_in[t][i].GetSize();
printf("\t %lld", (long long) x);
double factor = 1.0 * x / problem->graph_slices[gpu]->in_counter[i];
if (factor > max_in_sizing_)
{
max_in_sizing_ = factor;
}
}
}
}
if (num_gpus > 1)
{
printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize()));
}
printf("\n");
}
printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]);
if (num_gpus > 1)
{
printf("\t in_sizing =\t %lf", max_in_sizing_);
}
printf("\n");
}
// Clean up
if (org_size ) {delete[] org_size ; org_size = NULL;}
if (enactor ) {delete enactor ; enactor = NULL;}
if (problem ) {delete problem ; problem = NULL;}
if (reference_labels) {delete[] reference_labels; reference_labels = NULL;}
if (reference_preds ) {delete[] reference_preds ; reference_preds = NULL;}
if (h_labels ) {delete[] h_labels ; h_labels = NULL;}
if (h_preds ) {delete[] h_preds ; h_preds = NULL;}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
* @tparam SIZE_CHECK
* @tparam MARK_PREDECESSORS
*
* @param[in] info Pointer to info contains parameters and statistics.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK,
bool MARK_PREDECESSORS >
void RunTests_enable_idempotence(Info<VertexId, Value, SizeT> *info)
{
if (info->info["idempotent"].get_bool())
{
RunTests <VertexId, Value, SizeT, INSTRUMENT, DEBUG, SIZE_CHECK,
MARK_PREDECESSORS, true > (info);
}
else
{
RunTests <VertexId, Value, SizeT, INSTRUMENT, DEBUG, SIZE_CHECK,
MARK_PREDECESSORS, false> (info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
* @tparam SIZE_CHECK
*
* @param[in] info Pointer to info contains parameters and statistics.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK >
void RunTests_mark_predecessors(Info<VertexId, Value, SizeT> *info)
{
if (info->info["mark_predecessors"].get_bool())
{
RunTests_enable_idempotence<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, SIZE_CHECK, true> (info);
}
else
{
RunTests_enable_idempotence<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, SIZE_CHECK, false> (info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
*
* @param[in] info Pointer to info contains parameters and statistics.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG >
void RunTests_size_check(Info<VertexId, Value, SizeT> *info)
{
if (info->info["size_check"].get_bool())
{
RunTests_mark_predecessors<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, true>(info);
}
else
{
RunTests_mark_predecessors<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, false>(info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] info Pointer to info contains parameters and statistics.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT >
void RunTests_debug(Info<VertexId, Value, SizeT> *info)
{
if (info->info["debug_mode"].get_bool())
{
RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, true>(info);
}
else
{
RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, false>(info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] info Pointer to info contains parameters and statistics.
*/
template <
typename VertexId,
typename Value,
typename SizeT >
void RunTests_instrumented(Info<VertexId, Value, SizeT> *info)
{
if (info->info["instrument"].get_bool())
{
RunTests_debug<VertexId, Value, SizeT, true>(info);
}
else
{
RunTests_debug<VertexId, Value, SizeT, false>(info);
}
}
template <
typename VertexId,
typename Value,
typename SizeT >
void Write_gr(Info<VertexId, Value, SizeT> *info, CommandLineArgs *args)
{
Csr<VertexId, Value, SizeT> *graph = info -> csr_ptr;
std::string filename="";
std::ofstream gr_file;
SizeT edge_counter = 0;
bool keep_num = args->CheckCmdLineFlag("keep-num");
args->GetCmdLineArgument("output-filename", filename);
gr_file.open(filename.c_str());
if (args->CheckCmdLineFlag("include-header"))
gr_file<<graph -> nodes <<" "<<graph->nodes<<" "<<graph->edges<<std::endl;
for (VertexId u=0; u< graph->nodes; u++)
{
for (SizeT i=graph -> row_offsets[u];
i<graph -> row_offsets[u+1]; i++)
{
VertexId v = graph -> column_indices[i];
if (keep_num)
gr_file<<u+1<<" "<<v+1;
else gr_file<<u<<" "<<v;
if (graph -> edge_values != NULL)
gr_file<<" "<<graph -> edge_values[i];
gr_file<<std::endl;
edge_counter ++;
}
}
gr_file.close();
printf("%lld nodes and %lld edges written into gr file %s\n",
(long long)graph -> nodes, (long long)edge_counter,
filename.c_str());
}
/******************************************************************************
* Main
******************************************************************************/
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
int graph_args = argc - args.ParsedArgc() - 1;
if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help"))
{
Usage();
return 1;
}
typedef int VertexId; // Use int as the vertex identifier
typedef int Value; // Use int as the value type
typedef int SizeT; // Use int as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // graph we process on
Info<VertexId, Value, SizeT> *info = new Info<VertexId, Value, SizeT>;
// graph construction or generation related parameters
info->info["undirected"] = args.CheckCmdLineFlag("undirected");
info->info["edge_value"] = args.CheckCmdLineFlag("edge_value");
info->Init("BFS", args, csr); // initialize Info structure
//RunTests_instrumented<VertexId, Value, SizeT>(info); // run test
Write_gr<VertexId, Value, SizeT>(info, &args);
return 0;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
b0f2ca28baaaefc348b4b8c3332c8fc712d6913c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/csr_sort.cc
* \brief Sort COO index
*/
#include <dgl/array.h>
#include "../../runtime/cuda/cuda_common.h"
#include "./utils.h"
namespace dgl {
using runtime::NDArray;
namespace aten {
namespace impl {
/*!
* \brief Check whether each row is sorted.
*/
template <typename IdType>
__global__ void _SegmentIsSorted(
const IdType* indptr, const IdType* indices,
int64_t num_rows, int8_t* flags) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
const int stride_x = gridDim.x * blockDim.x;
while (tx < num_rows) {
bool f = true;
for (IdType i = indptr[tx] + 1; f && i < indptr[tx + 1]; ++i) {
f = (indices[i - 1] <= indices[i]);
}
flags[tx] = static_cast<int8_t>(f);
tx += stride_x;
}
}
template <DLDeviceType XPU, typename IdType>
bool CSRIsSorted(CSRMatrix csr) {
const auto& ctx = csr.indptr->ctx;
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
auto device = runtime::DeviceAPI::Get(ctx);
// We allocate a workspace of num_rows bytes. It wastes a little bit memory but should
// be fine.
int8_t* flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, csr.num_rows));
const int nt = cuda::FindNumThreads(csr.num_rows);
const int nb = (csr.num_rows + nt - 1) / nt;
hipLaunchKernelGGL(( _SegmentIsSorted), dim3(nb), dim3(nt), 0, thr_entry->stream,
csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(),
csr.num_rows, flags);
bool ret = cuda::AllTrue(flags, csr.num_rows, ctx);
device->FreeWorkspace(ctx, flags);
return ret;
}
template bool CSRIsSorted<kDLGPU, int32_t>(CSRMatrix csr);
template bool CSRIsSorted<kDLGPU, int64_t>(CSRMatrix csr);
template <DLDeviceType XPU, typename IdType>
void CSRSort_(CSRMatrix* csr) {
CHECK(sizeof(IdType) == 4) << "CUDA CSRSort_ does not support int64.";
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
auto device = runtime::DeviceAPI::Get(csr->indptr->ctx);
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, thr_entry->stream));
NDArray indptr = csr->indptr;
NDArray indices = csr->indices;
const auto& ctx = indptr->ctx;
const int64_t nnz = indices->shape[0];
if (!aten::CSRHasData(*csr))
csr->data = aten::Range(0, nnz, indices->dtype.bits, ctx);
NDArray data = csr->data;
size_t workspace_size = 0;
CUSPARSE_CALL(hipsparseXcsrsort_bufferSizeExt(
thr_entry->cusparse_handle,
csr->num_rows, csr->num_cols, nnz,
indptr.Ptr<int32_t>(), indices.Ptr<int32_t>(),
&workspace_size));
void* workspace = device->AllocWorkspace(ctx, workspace_size);
hipsparseMatDescr_t descr;
CUSPARSE_CALL(hipsparseCreateMatDescr(&descr));
CUSPARSE_CALL(hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
CUSPARSE_CALL(hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO));
CUSPARSE_CALL(hipsparseXcsrsort(
thr_entry->cusparse_handle,
csr->num_rows, csr->num_cols, nnz,
descr,
indptr.Ptr<int32_t>(), indices.Ptr<int32_t>(),
data.Ptr<int32_t>(),
workspace));
csr->sorted = true;
// free resources
CUSPARSE_CALL(hipsparseDestroyMatDescr(descr));
device->FreeWorkspace(ctx, workspace);
}
template void CSRSort_<kDLGPU, int32_t>(CSRMatrix* csr);
template void CSRSort_<kDLGPU, int64_t>(CSRMatrix* csr);
} // namespace impl
} // namespace aten
} // namespace dgl
| b0f2ca28baaaefc348b4b8c3332c8fc712d6913c.cu | /*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/csr_sort.cc
* \brief Sort COO index
*/
#include <dgl/array.h>
#include "../../runtime/cuda/cuda_common.h"
#include "./utils.h"
namespace dgl {
using runtime::NDArray;
namespace aten {
namespace impl {
/*!
* \brief Check whether each row is sorted.
*/
template <typename IdType>
__global__ void _SegmentIsSorted(
const IdType* indptr, const IdType* indices,
int64_t num_rows, int8_t* flags) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
const int stride_x = gridDim.x * blockDim.x;
while (tx < num_rows) {
bool f = true;
for (IdType i = indptr[tx] + 1; f && i < indptr[tx + 1]; ++i) {
f = (indices[i - 1] <= indices[i]);
}
flags[tx] = static_cast<int8_t>(f);
tx += stride_x;
}
}
template <DLDeviceType XPU, typename IdType>
bool CSRIsSorted(CSRMatrix csr) {
const auto& ctx = csr.indptr->ctx;
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
auto device = runtime::DeviceAPI::Get(ctx);
// We allocate a workspace of num_rows bytes. It wastes a little bit memory but should
// be fine.
int8_t* flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, csr.num_rows));
const int nt = cuda::FindNumThreads(csr.num_rows);
const int nb = (csr.num_rows + nt - 1) / nt;
_SegmentIsSorted<<<nb, nt, 0, thr_entry->stream>>>(
csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(),
csr.num_rows, flags);
bool ret = cuda::AllTrue(flags, csr.num_rows, ctx);
device->FreeWorkspace(ctx, flags);
return ret;
}
template bool CSRIsSorted<kDLGPU, int32_t>(CSRMatrix csr);
template bool CSRIsSorted<kDLGPU, int64_t>(CSRMatrix csr);
template <DLDeviceType XPU, typename IdType>
void CSRSort_(CSRMatrix* csr) {
CHECK(sizeof(IdType) == 4) << "CUDA CSRSort_ does not support int64.";
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
auto device = runtime::DeviceAPI::Get(csr->indptr->ctx);
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, thr_entry->stream));
NDArray indptr = csr->indptr;
NDArray indices = csr->indices;
const auto& ctx = indptr->ctx;
const int64_t nnz = indices->shape[0];
if (!aten::CSRHasData(*csr))
csr->data = aten::Range(0, nnz, indices->dtype.bits, ctx);
NDArray data = csr->data;
size_t workspace_size = 0;
CUSPARSE_CALL(cusparseXcsrsort_bufferSizeExt(
thr_entry->cusparse_handle,
csr->num_rows, csr->num_cols, nnz,
indptr.Ptr<int32_t>(), indices.Ptr<int32_t>(),
&workspace_size));
void* workspace = device->AllocWorkspace(ctx, workspace_size);
cusparseMatDescr_t descr;
CUSPARSE_CALL(cusparseCreateMatDescr(&descr));
CUSPARSE_CALL(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL));
CUSPARSE_CALL(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO));
CUSPARSE_CALL(cusparseXcsrsort(
thr_entry->cusparse_handle,
csr->num_rows, csr->num_cols, nnz,
descr,
indptr.Ptr<int32_t>(), indices.Ptr<int32_t>(),
data.Ptr<int32_t>(),
workspace));
csr->sorted = true;
// free resources
CUSPARSE_CALL(cusparseDestroyMatDescr(descr));
device->FreeWorkspace(ctx, workspace);
}
template void CSRSort_<kDLGPU, int32_t>(CSRMatrix* csr);
template void CSRSort_<kDLGPU, int64_t>(CSRMatrix* csr);
} // namespace impl
} // namespace aten
} // namespace dgl
|
95fd47f8fc2ab83e41ef991f9cbd491f6db7757e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "Thinning.h"
#include "ErrorCode.h"
#include "Image.h"
using namespace std;
#define LOOP 100
int main(int argc, char const **argv)
{
if(argc < 2)
{
cout << "Please input image!" << endl;
return 0;
}
Thinning thin;
Image *inimg;
ImageBasicOp::newImage(&inimg);
int errcode;
errcode = ImageBasicOp::readFromFile(argv[1], inimg);
if (errcode != NO_ERROR) {
cout << "error: " << errcode << endl;
return 0;
}
for(int i = 0; i < inimg->width * inimg->height; i++)
if(inimg->imgData[i] != 0)
inimg->imgData[i] = 255;
Image *outimg1;
ImageBasicOp::newImage(&outimg1);
ImageBasicOp::makeAtHost(outimg1, inimg->width, inimg->height);
Image *outimg2;
ImageBasicOp::newImage(&outimg2);
ImageBasicOp::makeAtHost(outimg2, inimg->width, inimg->height);
Image *outimg3;
ImageBasicOp::newImage(&outimg3);
ImageBasicOp::makeAtHost(outimg3, inimg->width, inimg->height);
Image *outimg4;
ImageBasicOp::newImage(&outimg4);
ImageBasicOp::makeAtHost(outimg4, inimg->width, inimg->height);
Image *outimg5;
ImageBasicOp::newImage(&outimg5);
ImageBasicOp::makeAtHost(outimg5, inimg->width, inimg->height);
Image *outimg6;
ImageBasicOp::newImage(&outimg6);
ImageBasicOp::makeAtHost(outimg6, inimg->width, inimg->height);
hipEvent_t start, stop;
float runTime;
//
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for (int i = 0; i < LOOP; i++)
thin.thin(inimg, outimg1);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&runTime, start, stop);
cout << "thin() time is " << (runTime) / LOOP << " ms" << endl;
ImageBasicOp::copyToHost(outimg1);
ImageBasicOp::writeToFile("thin_outimg.bmp", outimg1);
// Pattern Pattern global
hipEventCreate(&start);
hipEventCreate(&stop);
// float runTime;
hipEventRecord(start, 0);
for (int i = 0; i < LOOP; i++)
thin.thinPt(inimg, outimg2);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&runTime, start, stop);
cout << "thinPt() time is " << (runTime) / LOOP << " ms" << endl;
ImageBasicOp::copyToHost(outimg2);
ImageBasicOp::writeToFile("thinPt_outimg.bmp", outimg2);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for (int i = 0; i < LOOP; i++)
thin.thinFour(inimg, outimg3);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&runTime, start, stop);
cout << "thinFour() time is " << (runTime) / LOOP << " ms" << endl;
ImageBasicOp::copyToHost(outimg3);
ImageBasicOp::writeToFile("thinFour_outimg.bmp", outimg3);
// ,
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for (int i = 0; i < LOOP; i++)
thin.thinPtFour(inimg, outimg4);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&runTime, start, stop);
cout << "thinPtFour() time is " << (runTime) / LOOP << " ms" << endl;
ImageBasicOp::copyToHost(outimg4);
ImageBasicOp::writeToFile("thinPtFour_outimg.bmp", outimg4);
ImageBasicOp::deleteImage(inimg);
ImageBasicOp::deleteImage(outimg1);
ImageBasicOp::deleteImage(outimg2);
ImageBasicOp::deleteImage(outimg3);
ImageBasicOp::deleteImage(outimg4);
ImageBasicOp::deleteImage(outimg5);
ImageBasicOp::deleteImage(outimg6);
return 0;
}
| 95fd47f8fc2ab83e41ef991f9cbd491f6db7757e.cu | #include <iostream>
#include "Thinning.h"
#include "ErrorCode.h"
#include "Image.h"
using namespace std;
#define LOOP 100
int main(int argc, char const **argv)
{
if(argc < 2)
{
cout << "Please input image!" << endl;
return 0;
}
Thinning thin;
Image *inimg;
ImageBasicOp::newImage(&inimg);
int errcode;
errcode = ImageBasicOp::readFromFile(argv[1], inimg);
if (errcode != NO_ERROR) {
cout << "error: " << errcode << endl;
return 0;
}
for(int i = 0; i < inimg->width * inimg->height; i++)
if(inimg->imgData[i] != 0)
inimg->imgData[i] = 255;
Image *outimg1;
ImageBasicOp::newImage(&outimg1);
ImageBasicOp::makeAtHost(outimg1, inimg->width, inimg->height);
Image *outimg2;
ImageBasicOp::newImage(&outimg2);
ImageBasicOp::makeAtHost(outimg2, inimg->width, inimg->height);
Image *outimg3;
ImageBasicOp::newImage(&outimg3);
ImageBasicOp::makeAtHost(outimg3, inimg->width, inimg->height);
Image *outimg4;
ImageBasicOp::newImage(&outimg4);
ImageBasicOp::makeAtHost(outimg4, inimg->width, inimg->height);
Image *outimg5;
ImageBasicOp::newImage(&outimg5);
ImageBasicOp::makeAtHost(outimg5, inimg->width, inimg->height);
Image *outimg6;
ImageBasicOp::newImage(&outimg6);
ImageBasicOp::makeAtHost(outimg6, inimg->width, inimg->height);
cudaEvent_t start, stop;
float runTime;
// 直接并行
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int i = 0; i < LOOP; i++)
thin.thin(inimg, outimg1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
cout << "thin() time is " << (runTime) / LOOP << " ms" << endl;
ImageBasicOp::copyToHost(outimg1);
ImageBasicOp::writeToFile("thin_outimg.bmp", outimg1);
// Pattern 表法,Pattern位于 global 内存
cudaEventCreate(&start);
cudaEventCreate(&stop);
// float runTime;
cudaEventRecord(start, 0);
for (int i = 0; i < LOOP; i++)
thin.thinPt(inimg, outimg2);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
cout << "thinPt() time is " << (runTime) / LOOP << " ms" << endl;
ImageBasicOp::copyToHost(outimg2);
ImageBasicOp::writeToFile("thinPt_outimg.bmp", outimg2);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int i = 0; i < LOOP; i++)
thin.thinFour(inimg, outimg3);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
cout << "thinFour() time is " << (runTime) / LOOP << " ms" << endl;
ImageBasicOp::copyToHost(outimg3);
ImageBasicOp::writeToFile("thinFour_outimg.bmp", outimg3);
// 直接并行,一个线程处理四个点
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int i = 0; i < LOOP; i++)
thin.thinPtFour(inimg, outimg4);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
cout << "thinPtFour() time is " << (runTime) / LOOP << " ms" << endl;
ImageBasicOp::copyToHost(outimg4);
ImageBasicOp::writeToFile("thinPtFour_outimg.bmp", outimg4);
ImageBasicOp::deleteImage(inimg);
ImageBasicOp::deleteImage(outimg1);
ImageBasicOp::deleteImage(outimg2);
ImageBasicOp::deleteImage(outimg3);
ImageBasicOp::deleteImage(outimg4);
ImageBasicOp::deleteImage(outimg5);
ImageBasicOp::deleteImage(outimg6);
return 0;
}
|
83ac25cde453d9250e9070c4a33ae13d0e892894.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void l2_norm(double *in1, double *in2, double *out){
int gid = getGid3d3d();
out[gid] = sqrt(in1[gid]*in1[gid] + in2[gid]*in2[gid]);
} | 83ac25cde453d9250e9070c4a33ae13d0e892894.cu | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void l2_norm(double *in1, double *in2, double *out){
int gid = getGid3d3d();
out[gid] = sqrt(in1[gid]*in1[gid] + in2[gid]*in2[gid]);
} |
e6781152bd660bf196d1deb74e49987e06172296.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void MultiplicarMatrices(float *m1, float *m2, float *mr, int columna1, int fila1, int columna2, int fila2)
{
int fila_r = blockIdx.y*blockDim.y+threadIdx.y;
int columna_r = blockIdx.x*blockDim.x+threadIdx.x;
float tmp_mult = 0;
if ((fila_r < fila2) && (columna_r < columna1)) {
for (int i = 0; i < fila2 ; i++) {
tmp_mult += m1[i+columna1*fila_r]*m2[i*columna2+columna_r];
}
mr[fila_r*columna2+columna_r]= tmp_mult;
}
}
float* LlenaMatriz(int fila,int columna, FILE *archivo, float *matriz){
for (int i = 0; i < (fila*columna); i++) {
fscanf(archivo,"%f,",&matriz[i]);
}
return matriz;
}
int main(int argc, char const *argv[]) {
FILE *archivo1;
FILE *archivo2;
int fila1, columna1, fila2, columna2, blockSize = 32, gridSize , numOper;
// Matrices entrada Host
float *h_m1, *h_m2;
// Matriz salida Host
float *h_mr;
// Matrices entrada Device
float *d_m1, *d_m2;
// Matriz de salida Device
float *d_mr;
archivo1 = fopen(argv[1],"r");
archivo2 = fopen(argv[2],"r");
if (archivo1 != NULL && archivo2 != NULL) {
fscanf(archivo1,"%d",&fila1);
fscanf(archivo1,"%d",&columna1);
fscanf(archivo2,"%d",&fila2);
fscanf(archivo2,"%d",&columna2);
if (columna1 == fila2) {
// Nmero de operaciones por hacer
numOper = columna1*fila2;
gridSize= (int) ceil(numOper/blockSize);
// Reservando y llenado de la matriz 2
h_m1 = (float*)malloc((fila1*columna1)); // Reserva memoria en el host
hipMalloc(&d_m1, (fila1*columna1)); // Reserva memoria en el device
h_m1 = LlenaMatriz(fila1,columna1,archivo1,h_m1); // Llena vector-matriz en el host
hipMemcpy( d_m1, h_m1, (fila1*columna1), hipMemcpyHostToDevice); // Llenar vector-matriz en el device
// Reservando y llenado de la matriz 2
h_m2 = (float*)malloc((fila2*columna2)); // Reserva memoria en el host
hipMalloc(&d_m2, (fila2*columna2)); // Reserva memoria en el device
h_m2 = LlenaMatriz(fila2,columna2,archivo2,h_m2); // Llena vector-matriz en el host
hipMemcpy( d_m2, h_m2, (fila2*columna2), hipMemcpyHostToDevice); // Llenar vector-matriz en el device
h_mr = (float*)malloc((fila2*columna1));
d_mr = (float*)malloc((fila2*columna1));
// Multiplicacin de matrices
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimThreads(gridSize,gridSize,1);
hipLaunchKernelGGL(( MultiplicarMatrices), dim3(dimBlock), dim3(dimThreads), 0, 0, d_m1,d_m2,d_mr, columna1, fila1, columna2, fila2);
hipMemcpy(h_mr,d_mr,(columna1*fila2),hipMemcpyDeviceToHost);
}h_m1 = (float*)malloc((fila1*columna1)); // Reserva memoria en el host
hipMalloc(&d_m1, (fila1*columna1)); // Reserva memoria en el device
h_m1 = LlenaMatriz(fila1,columna1,archivo1,h_m1); // Llena vector-matriz en el host
hipMemcpy( d_m1, h_m1, (fila1*columna1), hipMemcpyHostToDevice); // Llenar vector-matriz en el device
// Reservando y llenado de la matriz 2
}
}
| e6781152bd660bf196d1deb74e49987e06172296.cu | #include <stdio.h>
#include <stdlib.h>
__global__ void MultiplicarMatrices(float *m1, float *m2, float *mr, int columna1, int fila1, int columna2, int fila2)
{
int fila_r = blockIdx.y*blockDim.y+threadIdx.y;
int columna_r = blockIdx.x*blockDim.x+threadIdx.x;
float tmp_mult = 0;
if ((fila_r < fila2) && (columna_r < columna1)) {
for (int i = 0; i < fila2 ; i++) {
tmp_mult += m1[i+columna1*fila_r]*m2[i*columna2+columna_r];
}
mr[fila_r*columna2+columna_r]= tmp_mult;
}
}
float* LlenaMatriz(int fila,int columna, FILE *archivo, float *matriz){
for (int i = 0; i < (fila*columna); i++) {
fscanf(archivo,"%f,",&matriz[i]);
}
return matriz;
}
int main(int argc, char const *argv[]) {
FILE *archivo1;
FILE *archivo2;
int fila1, columna1, fila2, columna2, blockSize = 32, gridSize , numOper;
// Matrices entrada Host
float *h_m1, *h_m2;
// Matriz salida Host
float *h_mr;
// Matrices entrada Device
float *d_m1, *d_m2;
// Matriz de salida Device
float *d_mr;
archivo1 = fopen(argv[1],"r");
archivo2 = fopen(argv[2],"r");
if (archivo1 != NULL && archivo2 != NULL) {
fscanf(archivo1,"%d",&fila1);
fscanf(archivo1,"%d",&columna1);
fscanf(archivo2,"%d",&fila2);
fscanf(archivo2,"%d",&columna2);
if (columna1 == fila2) {
// Número de operaciones por hacer
numOper = columna1*fila2;
gridSize= (int) ceil(numOper/blockSize);
// Reservando y llenado de la matriz 2
h_m1 = (float*)malloc((fila1*columna1)); // Reserva memoria en el host
cudaMalloc(&d_m1, (fila1*columna1)); // Reserva memoria en el device
h_m1 = LlenaMatriz(fila1,columna1,archivo1,h_m1); // Llena vector-matriz en el host
cudaMemcpy( d_m1, h_m1, (fila1*columna1), cudaMemcpyHostToDevice); // Llenar vector-matriz en el device
// Reservando y llenado de la matriz 2
h_m2 = (float*)malloc((fila2*columna2)); // Reserva memoria en el host
cudaMalloc(&d_m2, (fila2*columna2)); // Reserva memoria en el device
h_m2 = LlenaMatriz(fila2,columna2,archivo2,h_m2); // Llena vector-matriz en el host
cudaMemcpy( d_m2, h_m2, (fila2*columna2), cudaMemcpyHostToDevice); // Llenar vector-matriz en el device
h_mr = (float*)malloc((fila2*columna1));
d_mr = (float*)malloc((fila2*columna1));
// Multiplicación de matrices
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimThreads(gridSize,gridSize,1);
MultiplicarMatrices<<<dimBlock, dimThreads>>>(d_m1,d_m2,d_mr, columna1, fila1, columna2, fila2);
cudaMemcpy(h_mr,d_mr,(columna1*fila2),cudaMemcpyDeviceToHost);
}h_m1 = (float*)malloc((fila1*columna1)); // Reserva memoria en el host
cudaMalloc(&d_m1, (fila1*columna1)); // Reserva memoria en el device
h_m1 = LlenaMatriz(fila1,columna1,archivo1,h_m1); // Llena vector-matriz en el host
cudaMemcpy( d_m1, h_m1, (fila1*columna1), cudaMemcpyHostToDevice); // Llenar vector-matriz en el device
// Reservando y llenado de la matriz 2
}
}
|
208d5b810209dc55575b3f37d3cf97c650e56b75.hip | // !!! This is a file automatically generated by hipify!!!
// Automatically generated CU for E:\GitHub\NeuroGPU\Figures\FigureS4_mainen./runModel.hoc
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "AllModels_hip.cuh"
// Universals:
#define PI (3.1415927f)
#define R (8.31441f)
#define FARADAY (96485.309f)
#define ktf (1000.*8.3134*(celsius + 273.15)/FARADAY)
#define DEF_vrest -65.
#define DEF_nai 10.
#define DEF_nao 140.
#define DEF_ena (115. + DEF_vrest)
#define DEF_ki 54.4
#define DEF_ko 2.5
#define DEF_ek (-12. + DEF_vrest)
#include <math.h>
#define DEF_cai 5.e-5
#define DEF_cao 2.
#define DEF_eca 12.5 *log(DEF_cao / DEF_cai)
// GGlobals:
#define celsius (37.0)
#define stoprun (0.0)
#define clamp_resist (0.001)
#define secondorder (0.0)
// NGlobals:
#define q10_ca (2.3)
#define temp_ca (23.0)
#define tadj_ca (3.2093639532679714)
#define vmin_ca (-120.0)
#define vmax_ca (100.0)
#define vshift_ca (0.0)
#define depth_cad (0.09334562733124982)
#define cainf_cad (0.0001)
#define taur_cad (200.0)
#define q10_kca (2.3)
#define temp_kca (23.0)
#define tadj_kca (3.2093639532679714)
#define vmin_kca (-120.0)
#define vmax_kca (100.0)
#define q10_km (2.3)
#define temp_km (23.0)
#define tadj_km (3.2093639532679714)
#define vmin_km (-120.0)
#define vmax_km (100.0)
#define q10_kv (2.3)
#define temp_kv (23.0)
#define tadj_kv (3.2093639532679714)
#define vmin_kv (-120.0)
#define vmax_kv (100.0)
#define q10_na (2.3)
#define temp_na (23.0)
#define tadj_na (3.2093639532679714)
#define vmin_na (-120.0)
#define vmax_na (100.0)
#define vshift_na (-5.0)
// Reversals:
#define ena (60.0f)
#define ek (-90.0f)
#define DEF_eca2 (140.0f)
// Declarations:
__device__ void Cutrates_ca(MYFTYPE v ,MYFTYPE gbar_ca,MYFTYPE cao_ca,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau);
__device__ void Curates_ca(MYFTYPE vm ,MYFTYPE gbar_ca,MYFTYPE cao_ca,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau);
__device__ void Curates_kca(MYFTYPE cai,MYFTYPE gbar_kca,MYFTYPE caix_kca,MYFTYPE Ra_kca,MYFTYPE Rb_kca,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau);
__device__ void Cutrates_km(MYFTYPE v ,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau);
__device__ void Curates_km(MYFTYPE v ,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau);
__device__ void Cutrates_kv(MYFTYPE v ,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau);
__device__ void Curates_kv(MYFTYPE v ,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau);
__device__ void Cutrates_na(MYFTYPE v,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau);
__device__ void Curates_na(MYFTYPE vm,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau);
float Cunernst(float ci,float co, float z) {
if (z == 0) {
return 0.;
}
if (ci <= 0.) {
return 1e6;
}else if (co <= 0.) {
return -1e6;
}else{
return ktf/z*log(co/ci);
}
}
// Functions:
__device__ MYFTYPE Cuefun_ca(MYFTYPE z){
if (fabs(z) < 1e-4) {;
return 1 - z/2;
}else{;
return z/(exp(z) - 1);
};
};
__device__ MYFTYPE Cuefun_km(MYFTYPE z){
if (fabs(z) < 1e-4) {;
return 1 - z/2;
}else{;
return z/(exp(z) - 1);
};
};
__device__ MYFTYPE Cuefun_kv(MYFTYPE z){
if (fabs(z) < 1e-4) {;
return 1 - z/2;
}else{;
return z/(exp(z) - 1);
};
};
__device__ MYFTYPE Cutrap0_na(MYFTYPE v,MYFTYPE th,MYFTYPE a,MYFTYPE q){
if (fabs((v-th)/q) > 1e-6) {;
return a * (v - th) / (1 - exp(-(v - th)/q));
} else {;
return a * q;
};
} ;
// Procedures:
__device__ void Cutrates_ca(MYFTYPE v ,MYFTYPE gbar_ca,MYFTYPE cao_ca,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau) {
Curates_ca ( v ,gbar_ca,cao_ca,hinf,htau,minf,mtau);
}
__device__ void Curates_ca(MYFTYPE vm ,MYFTYPE gbar_ca,MYFTYPE cao_ca,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau) {
MYFTYPE a , b ;
/* removed tadj_ca recalculation */
a = 0.209 * Cuefun_ca ( - ( 27.0 + vm ) / 3.8 ) ;
b = 0.94 * exp ( ( - 75.0 - vm ) / 17.0 ) ;
mtau = 1.0 / tadj_ca / ( a + b ) ;
minf = a / ( a + b ) ;
a = 0.000457 * exp ( ( - 13.0 - vm ) / 50.0 ) ;
b = 0.0065 / ( exp ( ( - vm - 15.0 ) / 28.0 ) + 1.0 ) ;
htau = 1.0 / tadj_ca / ( a + b ) ;
hinf = a / ( a + b ) ;
}
__device__ void Curates_kca(MYFTYPE cai,MYFTYPE gbar_kca,MYFTYPE caix_kca,MYFTYPE Ra_kca,MYFTYPE Rb_kca,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau) {
a = Ra_kca * powf( cai , caix_kca ) ;
b = Rb_kca ;
/* removed tadj_kca recalculation */
ntau = 1.0 / tadj_kca / ( a + b ) ;
ninf = a / ( a + b ) ;
}
__device__ void Cutrates_km(MYFTYPE v ,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau) {
Curates_km ( v ,gbar_km,tha_km,qa_km,Ra_km,Rb_km,a,b,ninf,ntau);
}
__device__ void Curates_km(MYFTYPE v ,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau) {
a = Ra_km * qa_km * Cuefun_km ( - ( v - tha_km ) / qa_km ) ;
b = Rb_km * qa_km * Cuefun_km ( ( v - tha_km ) / qa_km ) ;
/* removed tadj_km recalculation */
ntau = 1.0 / tadj_km / ( a + b ) ;
ninf = a / ( a + b ) ;
}
__device__ void Cutrates_kv(MYFTYPE v ,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau) {
Curates_kv ( v ,gbar_kv,tha_kv,qa_kv,Ra_kv,Rb_kv,a,b,ninf,ntau);
}
__device__ void Curates_kv(MYFTYPE v ,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau) {
a = Ra_kv * qa_kv * Cuefun_kv ( - ( v - tha_kv ) / qa_kv ) ;
b = Rb_kv * qa_kv * Cuefun_kv ( ( v - tha_kv ) / qa_kv ) ;
/* removed tadj_kv recalculation */
ntau = 1.0 / tadj_kv / ( a + b ) ;
ninf = a / ( a + b ) ;
}
__device__ void Cutrates_na(MYFTYPE v,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau) {
Curates_na ( v ,gbar_na,tha_na,qa_na,Ra_na,Rb_na,thi1_na,thi2_na,qi_na,thinf_na,qinf_na,Rg_na,Rd_na,hinf,htau,minf,mtau);
}
__device__ void Curates_na(MYFTYPE vm,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau) {
MYFTYPE a , b ;
a = Cutrap0_na ( vm , tha_na , Ra_na , qa_na ) ;
b = Cutrap0_na ( - vm , - tha_na , Rb_na , qa_na ) ;
/* removed tadj_na recalculation */
mtau = 1.0 / tadj_na / ( a + b ) ;
minf = a / ( a + b ) ;
a = Cutrap0_na ( vm , thi1_na , Rd_na , qi_na ) ;
b = Cutrap0_na ( - vm , - thi2_na , Rg_na , qi_na ) ;
htau = 1.0 / tadj_na / ( a + b ) ;
hinf = 1.0 / ( 1.0 + exp ( ( vm - thinf_na ) / qinf_na ) ) ;
}
// Inits:
__device__ void CuInitModel_ca(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gbar_ca,MYFTYPE cao_ca, MYFTYPE cai, MYFTYPE &ica,MYFTYPE &eca){
MYFTYPE hinf,htau,minf,mtau;
eca = ktf/2 *log(DEF_cao / cai);
eca = ktf/2 *log(DEF_cao / cai);
/* removed tadj_ca recalculation */
Cutrates_ca(v+vshift_ca,gbar_ca,cao_ca,hinf,htau,minf,mtau);
m = minf;
h = hinf;
};
__device__ void CuInitModel_cad(MYFTYPE v,MYFTYPE &ca, MYFTYPE ica, MYFTYPE &cai,MYFTYPE &eca){
cai = DEF_cai;
eca = ktf/2 *log(DEF_cao / cai);
ca = cainf_cad;
cai = ca;
};
__device__ void CuInitModel_kca(MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_kca,MYFTYPE caix_kca,MYFTYPE Ra_kca,MYFTYPE Rb_kca, MYFTYPE cai,MYFTYPE &eca){
MYFTYPE a,b,ninf,ntau;
eca = ktf/2 *log(DEF_cao / cai);
Curates_kca(cai,gbar_kca,caix_kca,Ra_kca,Rb_kca,a,b,ninf,ntau);
n = ninf;
};
__device__ void CuInitModel_km(MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km){
MYFTYPE a,b,ninf,ntau;
/* removed tadj_km recalculation */
Cutrates_km(v,gbar_km,tha_km,qa_km,Ra_km,Rb_km,a,b,ninf,ntau);
n = ninf;
};
__device__ void CuInitModel_kv(MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv){
MYFTYPE a,b,ninf,ntau;
/* removed tadj_kv recalculation */
Cutrates_kv(v,gbar_kv,tha_kv,qa_kv,Ra_kv,Rb_kv,a,b,ninf,ntau);
n = ninf;
};
__device__ void CuInitModel_na(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na){
MYFTYPE hinf,htau,minf,mtau;
/* removed tadj_na recalculation */
Cutrates_na(v+vshift_na,gbar_na,tha_na,qa_na,Ra_na,Rb_na,thi1_na,thi2_na,qi_na,thinf_na,qinf_na,Rg_na,Rd_na,hinf,htau,minf,mtau);
m = minf;
h = hinf;
};
__device__ void CuInitModel_pas(MYFTYPE v,MYFTYPE g_pas,MYFTYPE e_pas){
};
__device__ void CuInitModel_pas2(MYFTYPE v,MYFTYPE g_pas2,MYFTYPE e_pas2){
};
__device__ void CuInitModel_pasx(MYFTYPE v,MYFTYPE g_pasx,MYFTYPE e_pasx){
};
// Derivs:
__device__ void CuDerivModel_ca(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gbar_ca,MYFTYPE cao_ca, MYFTYPE cai, MYFTYPE &ica,MYFTYPE &eca){
MYFTYPE hinf,htau,minf,mtau;
Cutrates_ca ( v + vshift_ca,gbar_ca,cao_ca,hinf,htau,minf,mtau);
m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mtau)))*(- ( ( ( minf ) ) / mtau ) / ( ( ( ( - 1.0 ) ) ) / mtau ) - m) ;
h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / htau)))*(- ( ( ( hinf ) ) / htau ) / ( ( ( ( - 1.0 ) ) ) / htau ) - h) ;
eca = ktf/2 *log(DEF_cao / cai);
}
__device__ void CuDerivModel_cad(MYFTYPE dt, MYFTYPE v,MYFTYPE &ca, MYFTYPE ica, MYFTYPE &cai,MYFTYPE &eca){
MYFTYPE drive_channel;
drive_channel = - ( 10000.0 ) * ica / ( 2.0 * FARADAY * depth_cad ) ;
if ( drive_channel <= 0. ) {
drive_channel = 0. ;
}
ca = ca + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / taur_cad)))*(- ( drive_channel + ( ( cainf_cad ) ) / taur_cad ) / ( ( ( ( - 1.0 ) ) ) / taur_cad ) - ca) ;
cai = ca ;
eca = ktf/2 *log(DEF_cao / cai);
}
__device__ void CuDerivModel_kca(MYFTYPE dt, MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_kca,MYFTYPE caix_kca,MYFTYPE Ra_kca,MYFTYPE Rb_kca, MYFTYPE cai,MYFTYPE &eca){
MYFTYPE ek;
MYFTYPE a,b,ninf,ntau;
Curates_kca ( cai,gbar_kca,caix_kca,Ra_kca,Rb_kca,a,b,ninf,ntau);
n = n + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / ntau)))*(- ( ( ( ninf ) ) / ntau ) / ( ( ( ( - 1.0 ) ) ) / ntau ) - n) ;
eca = ktf/2 *log(DEF_cao / cai);
}
__device__ void CuDerivModel_km(MYFTYPE dt, MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km){
MYFTYPE a,b,ninf,ntau;
Cutrates_km ( v,gbar_km,tha_km,qa_km,Ra_km,Rb_km,a,b,ninf,ntau);
n = n + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / ntau)))*(- ( ( ( ninf ) ) / ntau ) / ( ( ( ( - 1.0 ) ) ) / ntau ) - n) ;
}
__device__ void CuDerivModel_kv(MYFTYPE dt, MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv){
MYFTYPE a,b,ninf,ntau;
Cutrates_kv ( v,gbar_kv,tha_kv,qa_kv,Ra_kv,Rb_kv,a,b,ninf,ntau);
n = n + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / ntau)))*(- ( ( ( ninf ) ) / ntau ) / ( ( ( ( - 1.0 ) ) ) / ntau ) - n) ;
}
__device__ void CuDerivModel_na(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na){
MYFTYPE hinf,htau,minf,mtau;
Cutrates_na ( v + vshift_na,gbar_na,tha_na,qa_na,Ra_na,Rb_na,thi1_na,thi2_na,qi_na,thinf_na,qinf_na,Rg_na,Rd_na,hinf,htau,minf,mtau);
m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mtau)))*(- ( ( ( minf ) ) / mtau ) / ( ( ( ( - 1.0 ) ) ) / mtau ) - m) ;
h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / htau)))*(- ( ( ( hinf ) ) / htau ) / ( ( ( ( - 1.0 ) ) ) / htau ) - h) ;
}
// Breaks:
__device__ void CuBreakpointModel_ca(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gbar_ca,MYFTYPE cao_ca, MYFTYPE cai, MYFTYPE &ica,MYFTYPE &eca) {
MYFTYPE hinf, mtau, minf, gca, htau;
MYFTYPE ;
MYFTYPE ica_ca;
gca = tadj_ca * gbar_ca * m * m * h ;
ica_ca = ( 1e-4 ) * gca * ( v - eca ) ;
sumCurrents+= ica_ca;
ica += ica_ca;
sumConductivity+= gca;
};
__device__ void CuBreakpointModel_cad(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &ca, MYFTYPE ica, MYFTYPE &cai,MYFTYPE &eca) {
MYFTYPE gca;
MYFTYPE ;
};
__device__ void CuBreakpointModel_kca(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_kca,MYFTYPE caix_kca,MYFTYPE Ra_kca,MYFTYPE Rb_kca, MYFTYPE cai,MYFTYPE &eca) {
MYFTYPE ntau, gca, gk, ninf;
MYFTYPE ik;
gk = tadj_kca * gbar_kca * n ;
ik = ( 1e-4 ) * gk * ( v - ek ) ;
sumCurrents+= ik;
sumConductivity+= gk;
};
__device__ void CuBreakpointModel_km(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km) {
MYFTYPE ntau, gk, ninf;
MYFTYPE ik;
gk = tadj_km * gbar_km * n ;
ik = ( 1e-4 ) * gk * ( v - ek ) ;
sumCurrents+= ik;
sumConductivity+= gk;
};
__device__ void CuBreakpointModel_kv(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv) {
MYFTYPE ntau, gk, ninf;
MYFTYPE ik;
gk = tadj_kv * gbar_kv * n ;
ik = ( 1e-4 ) * gk * ( v - ek ) ;
sumCurrents+= ik;
sumConductivity+= gk;
};
__device__ void CuBreakpointModel_na(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na) {
MYFTYPE hinf, mtau, minf, gna, htau;
MYFTYPE ina;
gna = tadj_na * gbar_na * m * m * m * h ;
ina = ( 1e-4 ) * gna * ( v - ena ) ;
sumCurrents+= ina;
sumConductivity+= gna;
};
__device__ void CuBreakpointModel_pas(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE g_pas,MYFTYPE e_pas) {
MYFTYPE;
MYFTYPE i;
i = g_pas * ( v - e_pas ) ;
i = i;
sumCurrents+= i;
sumConductivity+= g_pas;
};
__device__ void CuBreakpointModel_pas2(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE g_pas2,MYFTYPE e_pas2) {
MYFTYPE;
MYFTYPE i;
i = g_pas2 * ( v - e_pas2 ) ;
i = i;
sumCurrents+= i;
};
__device__ void CuBreakpointModel_pasx(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE g_pasx,MYFTYPE e_pasx) {
MYFTYPE;
MYFTYPE i;
i = g_pasx * ( v - e_pasx ) ;
i = i;
sumCurrents+= i;
};
| 208d5b810209dc55575b3f37d3cf97c650e56b75.cu | // Automatically generated CU for E:\GitHub\NeuroGPU\Figures\FigureS4_mainen./runModel.hoc
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "AllModels.cuh"
// Universals:
#define PI (3.1415927f)
#define R (8.31441f)
#define FARADAY (96485.309f)
#define ktf (1000.*8.3134*(celsius + 273.15)/FARADAY)
#define DEF_vrest -65.
#define DEF_nai 10.
#define DEF_nao 140.
#define DEF_ena (115. + DEF_vrest)
#define DEF_ki 54.4
#define DEF_ko 2.5
#define DEF_ek (-12. + DEF_vrest)
#include <math.h>
#define DEF_cai 5.e-5
#define DEF_cao 2.
#define DEF_eca 12.5 *log(DEF_cao / DEF_cai)
// GGlobals:
#define celsius (37.0)
#define stoprun (0.0)
#define clamp_resist (0.001)
#define secondorder (0.0)
// NGlobals:
#define q10_ca (2.3)
#define temp_ca (23.0)
#define tadj_ca (3.2093639532679714)
#define vmin_ca (-120.0)
#define vmax_ca (100.0)
#define vshift_ca (0.0)
#define depth_cad (0.09334562733124982)
#define cainf_cad (0.0001)
#define taur_cad (200.0)
#define q10_kca (2.3)
#define temp_kca (23.0)
#define tadj_kca (3.2093639532679714)
#define vmin_kca (-120.0)
#define vmax_kca (100.0)
#define q10_km (2.3)
#define temp_km (23.0)
#define tadj_km (3.2093639532679714)
#define vmin_km (-120.0)
#define vmax_km (100.0)
#define q10_kv (2.3)
#define temp_kv (23.0)
#define tadj_kv (3.2093639532679714)
#define vmin_kv (-120.0)
#define vmax_kv (100.0)
#define q10_na (2.3)
#define temp_na (23.0)
#define tadj_na (3.2093639532679714)
#define vmin_na (-120.0)
#define vmax_na (100.0)
#define vshift_na (-5.0)
// Reversals:
#define ena (60.0f)
#define ek (-90.0f)
#define DEF_eca2 (140.0f)
// Declarations:
__device__ void Cutrates_ca(MYFTYPE v ,MYFTYPE gbar_ca,MYFTYPE cao_ca,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau);
__device__ void Curates_ca(MYFTYPE vm ,MYFTYPE gbar_ca,MYFTYPE cao_ca,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau);
__device__ void Curates_kca(MYFTYPE cai,MYFTYPE gbar_kca,MYFTYPE caix_kca,MYFTYPE Ra_kca,MYFTYPE Rb_kca,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau);
__device__ void Cutrates_km(MYFTYPE v ,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau);
__device__ void Curates_km(MYFTYPE v ,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau);
__device__ void Cutrates_kv(MYFTYPE v ,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau);
__device__ void Curates_kv(MYFTYPE v ,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau);
__device__ void Cutrates_na(MYFTYPE v,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau);
__device__ void Curates_na(MYFTYPE vm,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau);
float Cunernst(float ci,float co, float z) {
if (z == 0) {
return 0.;
}
if (ci <= 0.) {
return 1e6;
}else if (co <= 0.) {
return -1e6;
}else{
return ktf/z*log(co/ci);
}
}
// Functions:
__device__ MYFTYPE Cuefun_ca(MYFTYPE z){
if (fabs(z) < 1e-4) {;
return 1 - z/2;
}else{;
return z/(exp(z) - 1);
};
};
__device__ MYFTYPE Cuefun_km(MYFTYPE z){
if (fabs(z) < 1e-4) {;
return 1 - z/2;
}else{;
return z/(exp(z) - 1);
};
};
__device__ MYFTYPE Cuefun_kv(MYFTYPE z){
if (fabs(z) < 1e-4) {;
return 1 - z/2;
}else{;
return z/(exp(z) - 1);
};
};
__device__ MYFTYPE Cutrap0_na(MYFTYPE v,MYFTYPE th,MYFTYPE a,MYFTYPE q){
if (fabs((v-th)/q) > 1e-6) {;
return a * (v - th) / (1 - exp(-(v - th)/q));
} else {;
return a * q;
};
} ;
// Procedures:
__device__ void Cutrates_ca(MYFTYPE v ,MYFTYPE gbar_ca,MYFTYPE cao_ca,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau) {
Curates_ca ( v ,gbar_ca,cao_ca,hinf,htau,minf,mtau);
}
__device__ void Curates_ca(MYFTYPE vm ,MYFTYPE gbar_ca,MYFTYPE cao_ca,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau) {
MYFTYPE a , b ;
/* removed tadj_ca recalculation */
a = 0.209 * Cuefun_ca ( - ( 27.0 + vm ) / 3.8 ) ;
b = 0.94 * exp ( ( - 75.0 - vm ) / 17.0 ) ;
mtau = 1.0 / tadj_ca / ( a + b ) ;
minf = a / ( a + b ) ;
a = 0.000457 * exp ( ( - 13.0 - vm ) / 50.0 ) ;
b = 0.0065 / ( exp ( ( - vm - 15.0 ) / 28.0 ) + 1.0 ) ;
htau = 1.0 / tadj_ca / ( a + b ) ;
hinf = a / ( a + b ) ;
}
__device__ void Curates_kca(MYFTYPE cai,MYFTYPE gbar_kca,MYFTYPE caix_kca,MYFTYPE Ra_kca,MYFTYPE Rb_kca,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau) {
a = Ra_kca * powf( cai , caix_kca ) ;
b = Rb_kca ;
/* removed tadj_kca recalculation */
ntau = 1.0 / tadj_kca / ( a + b ) ;
ninf = a / ( a + b ) ;
}
__device__ void Cutrates_km(MYFTYPE v ,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau) {
Curates_km ( v ,gbar_km,tha_km,qa_km,Ra_km,Rb_km,a,b,ninf,ntau);
}
__device__ void Curates_km(MYFTYPE v ,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau) {
a = Ra_km * qa_km * Cuefun_km ( - ( v - tha_km ) / qa_km ) ;
b = Rb_km * qa_km * Cuefun_km ( ( v - tha_km ) / qa_km ) ;
/* removed tadj_km recalculation */
ntau = 1.0 / tadj_km / ( a + b ) ;
ninf = a / ( a + b ) ;
}
__device__ void Cutrates_kv(MYFTYPE v ,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau) {
Curates_kv ( v ,gbar_kv,tha_kv,qa_kv,Ra_kv,Rb_kv,a,b,ninf,ntau);
}
__device__ void Curates_kv(MYFTYPE v ,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv,MYFTYPE &a,MYFTYPE &b,MYFTYPE &ninf,MYFTYPE &ntau) {
a = Ra_kv * qa_kv * Cuefun_kv ( - ( v - tha_kv ) / qa_kv ) ;
b = Rb_kv * qa_kv * Cuefun_kv ( ( v - tha_kv ) / qa_kv ) ;
/* removed tadj_kv recalculation */
ntau = 1.0 / tadj_kv / ( a + b ) ;
ninf = a / ( a + b ) ;
}
__device__ void Cutrates_na(MYFTYPE v,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau) {
Curates_na ( v ,gbar_na,tha_na,qa_na,Ra_na,Rb_na,thi1_na,thi2_na,qi_na,thinf_na,qinf_na,Rg_na,Rd_na,hinf,htau,minf,mtau);
}
__device__ void Curates_na(MYFTYPE vm,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na,MYFTYPE &hinf,MYFTYPE &htau,MYFTYPE &minf,MYFTYPE &mtau) {
MYFTYPE a , b ;
a = Cutrap0_na ( vm , tha_na , Ra_na , qa_na ) ;
b = Cutrap0_na ( - vm , - tha_na , Rb_na , qa_na ) ;
/* removed tadj_na recalculation */
mtau = 1.0 / tadj_na / ( a + b ) ;
minf = a / ( a + b ) ;
a = Cutrap0_na ( vm , thi1_na , Rd_na , qi_na ) ;
b = Cutrap0_na ( - vm , - thi2_na , Rg_na , qi_na ) ;
htau = 1.0 / tadj_na / ( a + b ) ;
hinf = 1.0 / ( 1.0 + exp ( ( vm - thinf_na ) / qinf_na ) ) ;
}
// Inits:
__device__ void CuInitModel_ca(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gbar_ca,MYFTYPE cao_ca, MYFTYPE cai, MYFTYPE &ica,MYFTYPE &eca){
MYFTYPE hinf,htau,minf,mtau;
eca = ktf/2 *log(DEF_cao / cai);
eca = ktf/2 *log(DEF_cao / cai);
/* removed tadj_ca recalculation */
Cutrates_ca(v+vshift_ca,gbar_ca,cao_ca,hinf,htau,minf,mtau);
m = minf;
h = hinf;
};
__device__ void CuInitModel_cad(MYFTYPE v,MYFTYPE &ca, MYFTYPE ica, MYFTYPE &cai,MYFTYPE &eca){
cai = DEF_cai;
eca = ktf/2 *log(DEF_cao / cai);
ca = cainf_cad;
cai = ca;
};
__device__ void CuInitModel_kca(MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_kca,MYFTYPE caix_kca,MYFTYPE Ra_kca,MYFTYPE Rb_kca, MYFTYPE cai,MYFTYPE &eca){
MYFTYPE a,b,ninf,ntau;
eca = ktf/2 *log(DEF_cao / cai);
Curates_kca(cai,gbar_kca,caix_kca,Ra_kca,Rb_kca,a,b,ninf,ntau);
n = ninf;
};
__device__ void CuInitModel_km(MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km){
MYFTYPE a,b,ninf,ntau;
/* removed tadj_km recalculation */
Cutrates_km(v,gbar_km,tha_km,qa_km,Ra_km,Rb_km,a,b,ninf,ntau);
n = ninf;
};
__device__ void CuInitModel_kv(MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv){
MYFTYPE a,b,ninf,ntau;
/* removed tadj_kv recalculation */
Cutrates_kv(v,gbar_kv,tha_kv,qa_kv,Ra_kv,Rb_kv,a,b,ninf,ntau);
n = ninf;
};
__device__ void CuInitModel_na(MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na){
MYFTYPE hinf,htau,minf,mtau;
/* removed tadj_na recalculation */
Cutrates_na(v+vshift_na,gbar_na,tha_na,qa_na,Ra_na,Rb_na,thi1_na,thi2_na,qi_na,thinf_na,qinf_na,Rg_na,Rd_na,hinf,htau,minf,mtau);
m = minf;
h = hinf;
};
__device__ void CuInitModel_pas(MYFTYPE v,MYFTYPE g_pas,MYFTYPE e_pas){
};
__device__ void CuInitModel_pas2(MYFTYPE v,MYFTYPE g_pas2,MYFTYPE e_pas2){
};
__device__ void CuInitModel_pasx(MYFTYPE v,MYFTYPE g_pasx,MYFTYPE e_pasx){
};
// Derivs:
__device__ void CuDerivModel_ca(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gbar_ca,MYFTYPE cao_ca, MYFTYPE cai, MYFTYPE &ica,MYFTYPE &eca){
MYFTYPE hinf,htau,minf,mtau;
Cutrates_ca ( v + vshift_ca,gbar_ca,cao_ca,hinf,htau,minf,mtau);
m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mtau)))*(- ( ( ( minf ) ) / mtau ) / ( ( ( ( - 1.0 ) ) ) / mtau ) - m) ;
h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / htau)))*(- ( ( ( hinf ) ) / htau ) / ( ( ( ( - 1.0 ) ) ) / htau ) - h) ;
eca = ktf/2 *log(DEF_cao / cai);
}
__device__ void CuDerivModel_cad(MYFTYPE dt, MYFTYPE v,MYFTYPE &ca, MYFTYPE ica, MYFTYPE &cai,MYFTYPE &eca){
MYFTYPE drive_channel;
drive_channel = - ( 10000.0 ) * ica / ( 2.0 * FARADAY * depth_cad ) ;
if ( drive_channel <= 0. ) {
drive_channel = 0. ;
}
ca = ca + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / taur_cad)))*(- ( drive_channel + ( ( cainf_cad ) ) / taur_cad ) / ( ( ( ( - 1.0 ) ) ) / taur_cad ) - ca) ;
cai = ca ;
eca = ktf/2 *log(DEF_cao / cai);
}
__device__ void CuDerivModel_kca(MYFTYPE dt, MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_kca,MYFTYPE caix_kca,MYFTYPE Ra_kca,MYFTYPE Rb_kca, MYFTYPE cai,MYFTYPE &eca){
MYFTYPE ek;
MYFTYPE a,b,ninf,ntau;
Curates_kca ( cai,gbar_kca,caix_kca,Ra_kca,Rb_kca,a,b,ninf,ntau);
n = n + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / ntau)))*(- ( ( ( ninf ) ) / ntau ) / ( ( ( ( - 1.0 ) ) ) / ntau ) - n) ;
eca = ktf/2 *log(DEF_cao / cai);
}
__device__ void CuDerivModel_km(MYFTYPE dt, MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km){
MYFTYPE a,b,ninf,ntau;
Cutrates_km ( v,gbar_km,tha_km,qa_km,Ra_km,Rb_km,a,b,ninf,ntau);
n = n + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / ntau)))*(- ( ( ( ninf ) ) / ntau ) / ( ( ( ( - 1.0 ) ) ) / ntau ) - n) ;
}
__device__ void CuDerivModel_kv(MYFTYPE dt, MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv){
MYFTYPE a,b,ninf,ntau;
Cutrates_kv ( v,gbar_kv,tha_kv,qa_kv,Ra_kv,Rb_kv,a,b,ninf,ntau);
n = n + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / ntau)))*(- ( ( ( ninf ) ) / ntau ) / ( ( ( ( - 1.0 ) ) ) / ntau ) - n) ;
}
__device__ void CuDerivModel_na(MYFTYPE dt, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na){
MYFTYPE hinf,htau,minf,mtau;
Cutrates_na ( v + vshift_na,gbar_na,tha_na,qa_na,Ra_na,Rb_na,thi1_na,thi2_na,qi_na,thinf_na,qinf_na,Rg_na,Rd_na,hinf,htau,minf,mtau);
m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mtau)))*(- ( ( ( minf ) ) / mtau ) / ( ( ( ( - 1.0 ) ) ) / mtau ) - m) ;
h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / htau)))*(- ( ( ( hinf ) ) / htau ) / ( ( ( ( - 1.0 ) ) ) / htau ) - h) ;
}
// Breaks:
__device__ void CuBreakpointModel_ca(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gbar_ca,MYFTYPE cao_ca, MYFTYPE cai, MYFTYPE &ica,MYFTYPE &eca) {
MYFTYPE hinf, mtau, minf, gca, htau;
MYFTYPE ;
MYFTYPE ica_ca;
gca = tadj_ca * gbar_ca * m * m * h ;
ica_ca = ( 1e-4 ) * gca * ( v - eca ) ;
sumCurrents+= ica_ca;
ica += ica_ca;
sumConductivity+= gca;
};
__device__ void CuBreakpointModel_cad(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &ca, MYFTYPE ica, MYFTYPE &cai,MYFTYPE &eca) {
MYFTYPE gca;
MYFTYPE ;
};
__device__ void CuBreakpointModel_kca(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_kca,MYFTYPE caix_kca,MYFTYPE Ra_kca,MYFTYPE Rb_kca, MYFTYPE cai,MYFTYPE &eca) {
MYFTYPE ntau, gca, gk, ninf;
MYFTYPE ik;
gk = tadj_kca * gbar_kca * n ;
ik = ( 1e-4 ) * gk * ( v - ek ) ;
sumCurrents+= ik;
sumConductivity+= gk;
};
__device__ void CuBreakpointModel_km(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_km,MYFTYPE tha_km,MYFTYPE qa_km,MYFTYPE Ra_km,MYFTYPE Rb_km) {
MYFTYPE ntau, gk, ninf;
MYFTYPE ik;
gk = tadj_km * gbar_km * n ;
ik = ( 1e-4 ) * gk * ( v - ek ) ;
sumCurrents+= ik;
sumConductivity+= gk;
};
__device__ void CuBreakpointModel_kv(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &n,MYFTYPE gbar_kv,MYFTYPE tha_kv,MYFTYPE qa_kv,MYFTYPE Ra_kv,MYFTYPE Rb_kv) {
MYFTYPE ntau, gk, ninf;
MYFTYPE ik;
gk = tadj_kv * gbar_kv * n ;
ik = ( 1e-4 ) * gk * ( v - ek ) ;
sumCurrents+= ik;
sumConductivity+= gk;
};
__device__ void CuBreakpointModel_na(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE &m,MYFTYPE &h,MYFTYPE gbar_na,MYFTYPE tha_na,MYFTYPE qa_na,MYFTYPE Ra_na,MYFTYPE Rb_na,MYFTYPE thi1_na,MYFTYPE thi2_na,MYFTYPE qi_na,MYFTYPE thinf_na,MYFTYPE qinf_na,MYFTYPE Rg_na,MYFTYPE Rd_na) {
MYFTYPE hinf, mtau, minf, gna, htau;
MYFTYPE ina;
gna = tadj_na * gbar_na * m * m * m * h ;
ina = ( 1e-4 ) * gna * ( v - ena ) ;
sumCurrents+= ina;
sumConductivity+= gna;
};
__device__ void CuBreakpointModel_pas(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE g_pas,MYFTYPE e_pas) {
MYFTYPE;
MYFTYPE i;
i = g_pas * ( v - e_pas ) ;
i = i;
sumCurrents+= i;
sumConductivity+= g_pas;
};
__device__ void CuBreakpointModel_pas2(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE g_pas2,MYFTYPE e_pas2) {
MYFTYPE;
MYFTYPE i;
i = g_pas2 * ( v - e_pas2 ) ;
i = i;
sumCurrents+= i;
};
__device__ void CuBreakpointModel_pasx(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE g_pasx,MYFTYPE e_pasx) {
MYFTYPE;
MYFTYPE i;
i = g_pasx * ( v - e_pasx ) ;
i = i;
sumCurrents+= i;
};
|
4535d35d93e883e2c725282a7124666a43557421.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/bitplane_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void forward_i2b(const int n, const Dtype* in, Dtype* out, const int b, const int fl) {
CUDA_KERNEL_LOOP(index, n) {
Dtype otmp = in[index] * powf(2, fl);
unsigned utmp = __float2uint_rz(otmp);
unsigned btmp = (utmp >> b) & 0x00000001;
out[index] = __uint2float_rz(btmp);
}
}
/*template <typename Dtype>
__global__ void forward_b2i(const int n, const Dtype* in, Dtype* out, const Dtype scale) {
CUDA_KERNEL_LOOP(index, n) {
out[index] += in[index] * scale;
}
}*/
template <typename Dtype>
void BitplaneLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
//
const int num = bottom[0]->num(); // batches
const int channels = bottom[0]->channels();
const int height = bottom[0]->height();
const int width = bottom[0]->width();
const int spatial = height*width;
const int fmap = channels*spatial;
const int count = num*fmap;
//
const bool dir = this->layer_param_.bitplane_param().direction();
const int bw = this->layer_param_.bitplane_param().bw_layer();
const int fl = this->layer_param_.bitplane_param().fl_layer();
//
const int fmapI = fmap/bw;
const int countI = count/bw;
// set to zero
if (dir != true) {
caffe_gpu_set(countI, Dtype(0), top_data);
}
//
for (int n = 0; n < num; ++n) {
for (int b = 0; b < bw; ++b) {
if (dir == true) { // int to bits
forward_i2b<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(fmap)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
fmap, bottom_data + n*fmap, top_data + fmap*(b + n*bw), b, fl);
} else { // bits to int
const Dtype scale = powf(2, b-fl); // forward scaler
caffe_gpu_axpy(fmapI, scale, bottom_data + fmapI*(b + n*bw), top_data + n*fmapI);
/*forward_b2i<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(fmapI), CAFFE_CUDA_NUM_THREADS>>>(
fmapI, bottom_data + fmapI*(b + n*bw), top_data + n*fmapI, scale);*/
}
}
}
//
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void backward_i2b(const int n, const Dtype* in, Dtype* out, const Dtype* sw) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (sw[index] > Dtype(0));
}
}
/*template <typename Dtype>
__global__ void backward_b2i(const int n, const Dtype* in, Dtype* out, const Dtype scale) {
CUDA_KERNEL_LOOP(index, n) {
out[index] += in[index] * scale;
}
}*/
template <typename Dtype>
void BitplaneLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
//
const int num = bottom[0]->num(); // batches
const int channels = bottom[0]->channels();
const int height = bottom[0]->height();
const int width = bottom[0]->width();
const int spatial = height*width;
const int fmap = channels*spatial;
const int count = num*fmap;
//
const bool dir = this->layer_param_.bitplane_param().direction();
const int bw = this->layer_param_.bitplane_param().bw_layer();
const int fl = this->layer_param_.bitplane_param().fl_layer();
//
const int fmapI = fmap/bw;
//const int countI = count/bw;
const Dtype scale = 2.0 * powf(bw, -1); // simplified gradient scaler
//
if (dir == true) { // set to zero
caffe_gpu_set(count, Dtype(0), bottom_diff);
}
//
for (int n = 0; n < num; ++n) {
for (int b = 0; b < bw; ++b) {
if (dir != true) { // int to bits
backward_i2b<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(fmapI)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
fmapI, top_diff + n*fmapI, bottom_diff + fmapI*(b + n*bw), bottom_data + fmapI*(b + n*bw));
} else { // bits to int
caffe_gpu_axpy(fmap, scale, top_diff + fmap*(b + n*bw), bottom_diff + n*fmap);
/*backward_b2i<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(fmap), CAFFE_CUDA_NUM_THREADS>>>(
fmap, top_diff + fmap*(b + n*bw), bottom_diff + n*fmap, scale);*/
}
}
}
//
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BitplaneLayer);
} // namespace caffe
| 4535d35d93e883e2c725282a7124666a43557421.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/bitplane_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void forward_i2b(const int n, const Dtype* in, Dtype* out, const int b, const int fl) {
CUDA_KERNEL_LOOP(index, n) {
Dtype otmp = in[index] * powf(2, fl);
unsigned utmp = __float2uint_rz(otmp);
unsigned btmp = (utmp >> b) & 0x00000001;
out[index] = __uint2float_rz(btmp);
}
}
/*template <typename Dtype>
__global__ void forward_b2i(const int n, const Dtype* in, Dtype* out, const Dtype scale) {
CUDA_KERNEL_LOOP(index, n) {
out[index] += in[index] * scale;
}
}*/
template <typename Dtype>
void BitplaneLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
//
const int num = bottom[0]->num(); // batches
const int channels = bottom[0]->channels();
const int height = bottom[0]->height();
const int width = bottom[0]->width();
const int spatial = height*width;
const int fmap = channels*spatial;
const int count = num*fmap;
//
const bool dir = this->layer_param_.bitplane_param().direction();
const int bw = this->layer_param_.bitplane_param().bw_layer();
const int fl = this->layer_param_.bitplane_param().fl_layer();
//
const int fmapI = fmap/bw;
const int countI = count/bw;
// set to zero
if (dir != true) {
caffe_gpu_set(countI, Dtype(0), top_data);
}
//
for (int n = 0; n < num; ++n) {
for (int b = 0; b < bw; ++b) {
if (dir == true) { // int to bits
forward_i2b<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(fmap), CAFFE_CUDA_NUM_THREADS>>>(
fmap, bottom_data + n*fmap, top_data + fmap*(b + n*bw), b, fl);
} else { // bits to int
const Dtype scale = powf(2, b-fl); // forward scaler
caffe_gpu_axpy(fmapI, scale, bottom_data + fmapI*(b + n*bw), top_data + n*fmapI);
/*forward_b2i<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(fmapI), CAFFE_CUDA_NUM_THREADS>>>(
fmapI, bottom_data + fmapI*(b + n*bw), top_data + n*fmapI, scale);*/
}
}
}
//
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void backward_i2b(const int n, const Dtype* in, Dtype* out, const Dtype* sw) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (sw[index] > Dtype(0));
}
}
/*template <typename Dtype>
__global__ void backward_b2i(const int n, const Dtype* in, Dtype* out, const Dtype scale) {
CUDA_KERNEL_LOOP(index, n) {
out[index] += in[index] * scale;
}
}*/
template <typename Dtype>
void BitplaneLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
//
const int num = bottom[0]->num(); // batches
const int channels = bottom[0]->channels();
const int height = bottom[0]->height();
const int width = bottom[0]->width();
const int spatial = height*width;
const int fmap = channels*spatial;
const int count = num*fmap;
//
const bool dir = this->layer_param_.bitplane_param().direction();
const int bw = this->layer_param_.bitplane_param().bw_layer();
const int fl = this->layer_param_.bitplane_param().fl_layer();
//
const int fmapI = fmap/bw;
//const int countI = count/bw;
const Dtype scale = 2.0 * powf(bw, -1); // simplified gradient scaler
//
if (dir == true) { // set to zero
caffe_gpu_set(count, Dtype(0), bottom_diff);
}
//
for (int n = 0; n < num; ++n) {
for (int b = 0; b < bw; ++b) {
if (dir != true) { // int to bits
backward_i2b<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(fmapI), CAFFE_CUDA_NUM_THREADS>>>(
fmapI, top_diff + n*fmapI, bottom_diff + fmapI*(b + n*bw), bottom_data + fmapI*(b + n*bw));
} else { // bits to int
caffe_gpu_axpy(fmap, scale, top_diff + fmap*(b + n*bw), bottom_diff + n*fmap);
/*backward_b2i<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(fmap), CAFFE_CUDA_NUM_THREADS>>>(
fmap, top_diff + fmap*(b + n*bw), bottom_diff + n*fmap, scale);*/
}
}
}
//
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BitplaneLayer);
} // namespace caffe
|
4df0c289bd2cde87056aa96041169dbe915de4f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/* This file is copied from https://github.com/jzbonter/mc-cnn */
extern "C" {
}
#define TB 128
#define DISP_MAX 256
__global__ void remove_white(float *x, float *y, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
if (x[id] == 255) {
y[id] = 0;
}
}
} | 4df0c289bd2cde87056aa96041169dbe915de4f5.cu | #include "includes.h"
/* This file is copied from https://github.com/jzbonter/mc-cnn */
extern "C" {
}
#define TB 128
#define DISP_MAX 256
__global__ void remove_white(float *x, float *y, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
if (x[id] == 255) {
y[id] = 0;
}
}
} |
2c036cd2798bf8f6c596c32592f6f96bc207d94b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "Indice1D.h"
#include "cudaTools.h"
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n)
{
secondaire(ptrDevV1, ptrDevV2, ptrDevW, n); // pas necessaire, just for fun
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n)
{
const int NB_THREAD=Indice2D::nbThread();
const int TID=Indice2D::tid();
// Debug, facultatif
if (TID==0)
{
printf("Coucou from device tid = %d",TID); //required Device::synchronize(); after the call of kernel
}
//TODO pattern entrelacement
int s = TID;
while (s< n)
{
ptrDevW[s] = ptrDevV1[s] + ptrDevV2[s];
s+= NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 2c036cd2798bf8f6c596c32592f6f96bc207d94b.cu | #include "Indice2D.h"
#include "Indice1D.h"
#include "cudaTools.h"
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n)
{
secondaire(ptrDevV1, ptrDevV2, ptrDevW, n); // pas necessaire, just for fun
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n)
{
const int NB_THREAD=Indice2D::nbThread();
const int TID=Indice2D::tid();
// Debug, facultatif
if (TID==0)
{
printf("Coucou from device tid = %d",TID); //required Device::synchronize(); after the call of kernel
}
//TODO pattern entrelacement
int s = TID;
while (s< n)
{
ptrDevW[s] = ptrDevV1[s] + ptrDevV2[s];
s+= NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
76c2c2f82115c79a51ef5f34f813c777b863ea91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaComputeSignature(double* hyperplanes, double* v, int* dimensions, bool* sig, long* hyperp_length) {
long tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < *hyperp_length) {
int d_dimensions = *dimensions;
long pos = tid * d_dimensions;
double sum = 0.0;
for (int i = 0; i < d_dimensions; i++)
sum += hyperplanes[i+pos] * v[i];
sig[tid] = (sum>=0);
}
} | 76c2c2f82115c79a51ef5f34f813c777b863ea91.cu | #include "includes.h"
__global__ void cudaComputeSignature(double* hyperplanes, double* v, int* dimensions, bool* sig, long* hyperp_length) {
long tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < *hyperp_length) {
int d_dimensions = *dimensions;
long pos = tid * d_dimensions;
double sum = 0.0;
for (int i = 0; i < d_dimensions; i++)
sum += hyperplanes[i+pos] * v[i];
sig[tid] = (sum>=0);
}
} |
e664cc8bc9b8e38400c883f66b01b6cfaacaa72f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
// Includes
#include <stdio.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 240
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(const unsigned* A, const unsigned* B, unsigned* C, int N, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
//Excessive Logical Unit access
for(unsigned k=0; k<iterations * ( blockDim.x+200 );k++) {
Value1=I1 & I2;
Value2 |= (I1 | I2);
Value3=I1^Value2;
Value2|=Value1;
Value2=Value3 & Value2;
Value1=Value2 ^ Value3;
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int N, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive Addition access
for(unsigned k=0; k<iterations * ( blockDim.x+200 );k++) {
Value2= I1+I2;
Value3=I1-I2;
Value1-=Value2;
Value3+=Value1;
Value2-=Value3;
Value1+=Value3;
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
__global__ void PowerKernal3(const unsigned* A, const unsigned* B, unsigned* C, int N, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1;
unsigned Value2;
unsigned Value3;
unsigned Value;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive Multiplication
for(unsigned k=0; k<iterations*( blockDim.x+200 );k++) {
Value1=I1*I2;
Value1*=Value2;
Value3=Value1*I2;
Value2*=I1*Value3;
Value1*=Value2;
Value3*=Value1;
}
__syncthreads();
Value=Value3;
C[i]=Value;
__syncthreads();
}
__global__ void PowerKernal4(const unsigned* A, const unsigned* B, unsigned* C, int N, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=1000;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned k=0; k<iterations*(blockDim.x+20);k++) {
Value1=I1/(I2+1);
Value2=Value1/(I2+1);
Value3/= (I1/(I2+1) +1);
Value1/=(Value2+1);
Value3%=(Value2+1);
Value2/=(Value3+1);
Value1%=(Value+1);
Value3/=(Value1+1);
}
__syncthreads();
Value=Value3;
C[i]=Value;
__syncthreads();
}
__global__ void PowerKernalEmpty(const unsigned* A, const unsigned* B, unsigned* C, int N, int iterations)
// Host code
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
unsigned m;
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<iterations*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
__asm volatile ("{\n\t"
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:\n\t"
"}"
);
}
C[i]=I1;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
}
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N, iterations);
hipDeviceSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
dimGrid.y = NUM_OF_BLOCKS;
for (int i=0; i<3; i++) {
dimGrid.y /= 3;
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N, iterations);
hipDeviceSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N, iterations);
hipDeviceSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
}
dimGrid.y = NUM_OF_BLOCKS;
for (int i=0; i<3; i++) {
dimGrid.y /= 3;
hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N, iterations);
hipDeviceSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N, iterations);
hipDeviceSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
}
dimGrid.y = NUM_OF_BLOCKS;
for (int i=0; i<3; i++) {
dimGrid.y /= 3;
hipLaunchKernelGGL(( PowerKernal3), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N, iterations);
hipDeviceSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N, iterations);
hipDeviceSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
}
dimGrid.y = NUM_OF_BLOCKS;
for (int i=0; i<3; i++) {
hipLaunchKernelGGL(( PowerKernal4), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N, iterations);
hipDeviceSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N, iterations);
hipDeviceSynchronize();
}
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
| e664cc8bc9b8e38400c883f66b01b6cfaacaa72f.cu | #include <stdio.h>
#include <stdlib.h>
// Includes
#include <stdio.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 240
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(const unsigned* A, const unsigned* B, unsigned* C, int N, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
//Excessive Logical Unit access
for(unsigned k=0; k<iterations * ( blockDim.x+200 );k++) {
Value1=I1 & I2;
Value2 |= (I1 | I2);
Value3=I1^Value2;
Value2|=Value1;
Value2=Value3 & Value2;
Value1=Value2 ^ Value3;
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int N, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive Addition access
for(unsigned k=0; k<iterations * ( blockDim.x+200 );k++) {
Value2= I1+I2;
Value3=I1-I2;
Value1-=Value2;
Value3+=Value1;
Value2-=Value3;
Value1+=Value3;
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
__global__ void PowerKernal3(const unsigned* A, const unsigned* B, unsigned* C, int N, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1;
unsigned Value2;
unsigned Value3;
unsigned Value;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive Multiplication
for(unsigned k=0; k<iterations*( blockDim.x+200 );k++) {
Value1=I1*I2;
Value1*=Value2;
Value3=Value1*I2;
Value2*=I1*Value3;
Value1*=Value2;
Value3*=Value1;
}
__syncthreads();
Value=Value3;
C[i]=Value;
__syncthreads();
}
__global__ void PowerKernal4(const unsigned* A, const unsigned* B, unsigned* C, int N, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=1000;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned k=0; k<iterations*(blockDim.x+20);k++) {
Value1=I1/(I2+1);
Value2=Value1/(I2+1);
Value3/= (I1/(I2+1) +1);
Value1/=(Value2+1);
Value3%=(Value2+1);
Value2/=(Value3+1);
Value1%=(Value+1);
Value3/=(Value1+1);
}
__syncthreads();
Value=Value3;
C[i]=Value;
__syncthreads();
}
__global__ void PowerKernalEmpty(const unsigned* A, const unsigned* B, unsigned* C, int N, int iterations)
// Host code
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
unsigned m;
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<iterations*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
__asm volatile ("{\n\t"
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:\n\t"
"}"
);
}
C[i]=I1;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
}
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
cudaThreadSynchronize();
PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N, iterations);
cudaThreadSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
dimGrid.y = NUM_OF_BLOCKS;
for (int i=0; i<3; i++) {
dimGrid.y /= 3;
PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N, iterations);
cudaThreadSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N, iterations);
cudaThreadSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
}
dimGrid.y = NUM_OF_BLOCKS;
for (int i=0; i<3; i++) {
dimGrid.y /= 3;
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N, iterations);
cudaThreadSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N, iterations);
cudaThreadSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
}
dimGrid.y = NUM_OF_BLOCKS;
for (int i=0; i<3; i++) {
dimGrid.y /= 3;
PowerKernal3<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N, iterations);
cudaThreadSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N, iterations);
cudaThreadSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
}
dimGrid.y = NUM_OF_BLOCKS;
for (int i=0; i<3; i++) {
PowerKernal4<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N, iterations);
cudaThreadSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N, iterations);
cudaThreadSynchronize();
}
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
//printf("execution time = %f\n", cutGetTimerValue(my_timer));
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
|
8fc2eb151d6ee1c07c8bbecf5645b9f8dd2f5755.hip | // !!! This is a file automatically generated by hipify!!!
/**
\file cuSiddonWithSurfaces.cu
\brief Implementacin de siddon con memoria de surfaces para la imagen ya que permite escribirla, por eso esta funcin hace la retroproyeccin.
\todo
\bug
\warning
\author Martn Belzunce (martin.a.belzunce@gmail.com)
\date 2014.07.11
\version 1.1.0
*/
#ifndef _CUSIDDONWITHSURF_H_
#define _CUSIDDONWITHSURF_H_
#include <CuSiddon.h>
#include <float.h>
// Variables de Memoria constante utilizadas en Siddon. Se debe encargar de cargar los datos de forma rpevia a la reconstruccin.
__device__ __constant__ float dummy2; // Esto lo tuve que agregar porque el hipMemcpyToSymbol me tiraba error con la primera variable declarada ac, sea cual sea.
extern __device__ __constant__ float d_AxialFov_mm;
extern __device__ __constant__ float d_RadioFov_mm;
extern __device__ __constant__ SizeImage d_imageSize;
extern __device__ __constant__ int d_numPixelsPerSlice;
extern __device__ __constant__ int d_numBinsSino2d;
extern __device__ __constant__ float d_ringSize_mm;
surface<void, cudaSurfaceType3D> surfImage;
// This function calculates Siddon Wieghts for a lor. It gets as parameters, the LOR direction vector in
// a float4*, the first point of the lor in a float4, a float* where a posible input must be loaded,
// a float* where the result will be stored, and a int that says in which mode are we working.
// The modes availables are: SENSIBILITY_IMAGE -> It doesn't need any input, the output is a Image
// PROJECTIO -> The input is a Image, and the output is a Michelogram
// BACKPROJECTION -> The input is a Michelogram and the output is a Image
// The size of the volume must be loaded first in the global and constant variable named d_imageSize
// and the size of the michelogram in cuda_michelogram_size
__device__ void cuSiddonWithSurfaces (float4* LOR, float4* P0, float* image, float* sinogram, int indiceMichelogram)
{
// Variables relacionadas con el parmetro alpha de la recta de la lor.
float alpha_x_1, alpha_x_2, alpha_y_1, alpha_y_2; // Valores de alpha para la interseccin de la recta con el crculo del fov.
float alpha_x_min, alpha_y_min, alpha_x_max, alpha_y_max; // Valores de alpha de ambos puntos por coordenada, pero ahora separados por menor y mayor.
//float alpha_z_min, alpha_z_max;
float alpha_min, alpha_max; // Valores de alpha mnimo y mximo finales, o sea de entrada y salida al fov de la lor.
// Valores de alpha para recorrer la lor:
float alpha_x = FLT_MAX, alpha_y = FLT_MAX, alpha_z = FLT_MAX; // Valores de alpha si avanzo en x o en y, siempre debo ir siguiendo al ms cercano.
float alpha_x_u, alpha_y_u, alpha_z_u; // Valor de incremento de alpha segn avance un pxel en x o en y.
float alpha_c; // Valor de alhpa actual, cuando se recorre la lor.
// Variables relacionadas con los ndices de pxeles recorridos:
int i_min = 0, j_min = 0, k_min = 0; // ndices (i,j,k) del pxel de entrada al fov.
int i_max = 0, j_max = 0, k_max = 0; // ndices (i,j,k) del pxel de salida al fov.
int i, j, k; // ndices con que recorro los pxeles de la lor.
// Incrementos en pxeles. Puede ser +-1 segn la direccin de la lor.
int i_incr = 0, j_incr = 0, k_incr = 0;
// Cantidad de pxeles intersectados:
int numIntersectedPixels;
// Punto de entrada y salida al fov trasladado al borde del pxel, ya que en esta versin el pxel
// de entrada se considera entero, y no desde el punto exacto de itnerseccin con el crculo:
float x_1_mm, x_2_mm, y_1_mm, y_2_mm, z_1_mm, z_2_mm;
// Largo de la lor teniendo en cuenta P0 y P1, y largo de la lor dentro del fov:
float rayLength_mm; //, rayLengthInFov_mm;
// For Fov cilindrico:
// // Clculo de interseccin de la lor con un fov cilndrico.
// // Las lors siempre interesectan las caras curvas del cilindro y no las tapas. Ya
// // que el fov de los scanner est limitado por eso.
// // Lo calculo como la interseccin entre la recta y una circunferencia de radio cudaRFOV. La ecuacin a resolver es:
// // (X0+alpha*Vx).^2+(Y0+alpha*Vy).^2=cudaRFOV.^2
// // alpha = (-2*(Vx+Vy)+sqrt(4*Vx^2*(1-c)+4*Vy^2*(1-c) + 8(Vx+Vy)))/(2*(Vx^2+Vy^2))
// //float c = P0->x*P0->x + P0->y*P0->y - cudaRFOV*cudaRFOV;
// float segundoTermino = sqrt(4.0f*(LOR->x*LOR->x*(d_RadioFov_mm*d_RadioFov_mm-P0->y*P0->y)
// +LOR->y*LOR->y*(d_RadioFov_mm*d_RadioFov_mm-P0->x*P0->x)) + 8.0f*LOR->x*P0->x*LOR->y*P0->y);
//
// // Obtengo los valores de alpha donde se intersecciona la recta con la circunferencia.
// // Como la debera cruzar en dos puntos hay dos soluciones.
// alpha_xy_1 = (-2*(LOR->x*P0->x+LOR->y*P0->y) + segundoTermino)/(2*(LOR->x*LOR->x+LOR->y*LOR->y));
// alpha_xy_2 = (-2*(LOR->x*P0->x+LOR->y*P0->y) - segundoTermino)/(2*(LOR->x*LOR->x+LOR->y*LOR->y));
//
// // Valores de alpha de entrada y de salida. El de entrada es el menor, porque la lor
// // se recorre desde P0 a P1.
// alpha_min = min(alpha_xy_1, alpha_xy_2);
// alpha_max = max(alpha_xy_1, alpha_xy_2);
// Para FOV cuadrado:
// Obtengo la interseccin de la lor con las rectas x=-rFov_mm x=rFov_mm y=-rFov_mm y =rFov_mm
// Para dichos valores verifico que la otra coordenada este dentro de los valores, y obtengo
// los puntos de entrada y salida de la lor. No me fijo z, porque no debera ingresar por las
// tapas del cilindro, al menos que haya algn error entre el sinograma y la imagen de entrada.
float minValueX_mm = -d_RadioFov_mm;
float minValueY_mm = -d_RadioFov_mm;
float maxValueX_mm = d_RadioFov_mm;
float maxValueY_mm = d_RadioFov_mm;
// Calculates alpha values for the inferior planes (entry planes) of the FOV
if(LOR->x == 0) // Parallel to x axis
{
alpha_y_1 = (minValueY_mm - P0->y) / LOR->y;
alpha_y_2 = (maxValueY_mm - P0->y) / LOR->y;
if(alpha_y_1 < alpha_y_2)
{
alpha_min = alpha_y_1;
alpha_max = alpha_y_2;
}
else
{
alpha_min = alpha_y_2;
alpha_max = alpha_y_1;
}
}
else if(LOR->y == 0) // Parallel to y axis.
{
alpha_x_1 = (minValueX_mm - P0->x) / LOR->x;
alpha_x_2 = (maxValueX_mm - P0->x) / LOR->x;
if(alpha_x_1 < alpha_x_2)
{
alpha_min = alpha_x_1;
alpha_max = alpha_x_2;
}
else
{
alpha_min = alpha_x_2;
alpha_max = alpha_x_1;
}
}
else
{
alpha_x_1 = (minValueX_mm - P0->x) / LOR->x;
alpha_y_1 = (minValueY_mm - P0->y) / LOR->y;
// Calculates alpha values for superior planes ( going out planes) of the fov
alpha_x_2 = (maxValueX_mm - P0->x) / LOR->x; // ValuesX has one more element than pixels in X, thats we can use InputVolume->SizeX as index for the las element
alpha_y_2 = (maxValueY_mm - P0->y) / LOR->y;
//alpha min
alpha_x_min = min(alpha_x_1, alpha_x_2);
alpha_y_min = min(alpha_y_1, alpha_y_2);
//alpha_y_min = max((float)0, alpha_y_min);
alpha_min = max(alpha_x_min, alpha_y_min); //
//alpha max
alpha_x_max = max(alpha_x_1, alpha_x_2);
alpha_y_max = max(alpha_y_1, alpha_y_2);
alpha_max = min(alpha_x_max, alpha_y_max);
}
// if the radius of the scanner is less than the diagonal (alpha less than 0), the entry point should be P0
if ((alpha_min<0)||(alpha_min>1)) // I added (alpha_min>1), because for aprallel lors to an axis, both alphas can be positiver or negative.
alpha_min = 0;
// if the radius of the scanner is less than the diagonal (alpha less than 0), the entry point should be P0
if ((alpha_max>1)||(alpha_max<0))
alpha_max = 1;
// Fin para Fov Cuadrado.
// Coordenadas dentro de la imagen de los dos puntos de entrada:
x_1_mm = P0->x + LOR->x * alpha_min;
y_1_mm = P0->y + LOR->y * alpha_min;
z_1_mm = P0->z + LOR->z * alpha_min;
x_2_mm = P0->x + LOR->x * alpha_max;
y_2_mm = P0->y + LOR->y * alpha_max;
z_2_mm = P0->z + LOR->z * alpha_max;
//rayLengthInFov_mm = sqrt((x_2_mm-x_1_mm) * (x_2_mm-x_1_mm) + (y_2_mm-y_1_mm) * (y_2_mm-y_1_mm) + (z_2_mm-z_1_mm) * (z_2_mm-z_1_mm));
// Distancia total de la LOR. Es la distancia entre los puntos P0 y P1, habitualmente, esos son
// los puntos de la lor sobre el detector.
rayLength_mm = sqrt(((P0->x + LOR->x) - P0->x) * ((P0->x + LOR->x) - P0->x)
+ ((P0->y + LOR->y) - P0->y) * ((P0->y + LOR->y) - P0->y)
+ ((P0->z + LOR->z) - P0->z) * ((P0->z + LOR->z) - P0->z));
float offsetZ_mm = d_imageSize.sizePixelZ_mm/2;//(SCANNER_ZFOV - cudaZFOV)/2;
#ifdef __DEBUG__
if((z_1_mm < offsetZ_mm)||(z_1_mm > (d_AxialFov_mm-offsetZ_mm)) || (z_2_mm < offsetZ_mm)||(z_2_mm > (d_AxialFov_mm-offsetZ_mm)))
{
// La lor entra por las tapas del clindro del fov:
printf("Warning: Lor que entra por las tapas del cilindro del FoV.\n");
}
#endif
// Con el alhpa_min y el alpha_max tengo los puntos de entrada y salida al fov. De los cuales obtengo
// los ndices de los pxeles de entrada y salida del fov.
// En este caso me interesa el pxel de entrada, para luego considerarlo entero,
// por ms que la entrada al fov sea en un punto intermedio:
i_min = floorf((x_1_mm + d_RadioFov_mm)/d_imageSize.sizePixelX_mm); // In X increase of System Coordinate = Increase Pixels.
j_min = floorf((y_1_mm + d_RadioFov_mm)/d_imageSize.sizePixelY_mm);
k_min = floorf((z_1_mm - offsetZ_mm)/d_imageSize.sizePixelZ_mm);
i_max = floorf((x_2_mm + d_RadioFov_mm)/d_imageSize.sizePixelX_mm); // In X increase of System Coordinate = Increase Pixels.
j_max = floorf((y_2_mm + d_RadioFov_mm)/d_imageSize.sizePixelY_mm); //
k_max = floorf((z_2_mm - offsetZ_mm)/d_imageSize.sizePixelZ_mm);
// Esta verificacin y correccin la saco, porque a veces por error de redondeo puede quedar en el pxel -1 o en sizePixel
#ifdef __DEBUG__
// Verifico que los ndices de i y j dieron dentro de la imagen, sino es que que estoy fuera del fov.
if(((i_min<0)||(i_max<0))||((j_min<0)||(j_max<0))||((k_min<0)||(k_max<0))||((i_min>=d_imageSize.nPixelsX)||(i_max>=d_imageSize.nPixelsX))||
((j_min>=d_imageSize.nPixelsY)||(j_max>=d_imageSize.nPixelsY))||((k_min>=d_imageSize.nPixelsZ)||(k_max>=d_imageSize.nPixelsZ)))
{
// Por error de redondeo puede caer al lmite:
printf("Indices fuera de imagen. Pixel min: (%d,%d,%d) (%f,%f,%f)mm. Pixel max: (%d,%d,%d) (%f,%f,%f)mm.\n", i_min, j_min, k_min, x_1_mm, y_1_mm, z_1_mm, i_max, j_max, k_max, x_2_mm, y_2_mm, z_2_mm);
return;
}
#endif
// Cantidad de pxeles intersectados:
numIntersectedPixels = abs(i_max - i_min) + abs(j_max - j_min) + abs(k_max - k_min) + 2; // +0 in each dimension(for getting the amount of itnersections) -1 toget pixels> 3x1-1 = +2
// Pixels increments
// A partir del (i_min,j_min) voy recorriendo la lor, para determinar el ndice del prximo pxel, o sea
// saber si avanzo en i o en j, debo ver si el cambio se da en x o en y. Para esto en cada avance se calcula
// el valor de alpha si avanzar un pxel en i (o x) y el valor de alpha en j (o y). De estos dos valores: alpha_x
// y alpha_y, el que sea menor indicar en que sentido tengo que avanzar con el pxel.
i_incr = 0, j_incr = 0, k_incr = 0; //The increments are zero (perpendicular liine) if Vx = 0 for i, and so on
if(LOR->x > 0)
{
i_incr = 1;
alpha_x = ( -d_RadioFov_mm + (i_min + i_incr) * d_imageSize.sizePixelX_mm - P0->x ) / LOR->x;
}
else if(LOR->x < 0)
{
i_incr = -1;
alpha_x = ( -d_RadioFov_mm + i_min * d_imageSize.sizePixelX_mm - P0->x ) / LOR->x;
}
/*else
alpha_x = FLT_MAX;*/ // I can avoid this because I initialize with this value.
if(LOR->y > 0)
{
j_incr = 1; // Remeber than in Y and Z the increase in the SystemCoordinate means a decreas in the pixel index
alpha_y = ( -d_RadioFov_mm + (j_min + j_incr) * d_imageSize.sizePixelY_mm - P0->y ) / LOR->y;
}
else if(LOR->y < 0)
{
j_incr = -1;
alpha_y = ( -d_RadioFov_mm + j_min * d_imageSize.sizePixelY_mm - P0->y) / LOR->y;
}
/*if (alpha_y <0)
alpha_y = FLT_MAX;
*/
if(LOR->z > 0)
{
k_incr = 1; // Remeber than in Y and Z the increase in the SystemCoordinate means a decreas in the pixel index
alpha_z = ( offsetZ_mm + (k_min + k_incr) * d_imageSize.sizePixelZ_mm - P0->z) / LOR->z;
}
else if(LOR->z < 0)
{
k_incr = -1;
alpha_z = ( offsetZ_mm + k_min * d_imageSize.sizePixelZ_mm - P0->z ) / LOR->z;
}
/*if (alpha_z <0)
alpha_z = FLT_MAX;
*/
// Incremento en los valores de alpha, segn se avanza un pxel en x o en y.
alpha_x_u = fabsf(d_imageSize.sizePixelX_mm / (LOR->x)); //alpha_x_u = DistanciaPixelX / TotalDelRayo - Remember that Vx must be loaded in order to be the diference in X between the two points of the lor
alpha_y_u = fabsf(d_imageSize.sizePixelY_mm / (LOR->y));
alpha_z_u = fabsf(d_imageSize.sizePixelZ_mm / (LOR->z));
// En alpha_c voy guardando el valor de alpha con el que voy recorriendo los pxeles.
alpha_c = alpha_min;
// Inicializacin de i,j a sus valores de entrada.
i = i_min;
j = j_min;
k = k_min;
// Recorro la lor y guardo los segmentos en la lista de salida.
float siddonWeight = 0, result = 0, aux = 0;
aux = sinogram[indiceMichelogram]; // Use a register to accelerate the access to the value of the sinogram bin.
for(int m = 0; m < numIntersectedPixels; m++)
{
if((alpha_x <= alpha_y) && (alpha_x <= alpha_z))
{
// Cruce por el plano x: avanzo en i.
siddonWeight = (alpha_x - alpha_c) * rayLength_mm;
i += i_incr;
alpha_c = alpha_x;
alpha_x += alpha_x_u;
}
else if((alpha_y <= alpha_x) && (alpha_y <= alpha_z))
{
siddonWeight = (alpha_y - alpha_c) * rayLength_mm;
// Cruce por el plano y: avanzo en j.
j += j_incr;
alpha_c = alpha_y;
alpha_y += alpha_y_u;
}
else
{
// Cruce por el plano y: avanzo en j.
siddonWeight = (alpha_z - alpha_c) * rayLength_mm;
k += k_incr;
alpha_c = alpha_z;
alpha_z += alpha_z_u;
}
//aux = tex3D(texImage,i+0.5f,j+0.5f,k+0.5f);
// Write to output surface
surf3Dread(&result, surfImage,i*sizeof(float),j,k, hipBoundaryModeClamp);
result = result + siddonWeight * aux;
surf3Dwrite(result, surfImage,i*sizeof(float),j,k, hipBoundaryModeClamp);
//atomicAdd(Result+indicePixel, siddonWeight * d_imageSize.sizePixelX_mm);
}
}
#endif
| 8fc2eb151d6ee1c07c8bbecf5645b9f8dd2f5755.cu | /**
\file cuSiddonWithSurfaces.cu
\brief Implementación de siddon con memoria de surfaces para la imagen ya que permite escribirla, por eso esta función hace la retroproyección.
\todo
\bug
\warning
\author Martín Belzunce (martin.a.belzunce@gmail.com)
\date 2014.07.11
\version 1.1.0
*/
#ifndef _CUSIDDONWITHSURF_H_
#define _CUSIDDONWITHSURF_H_
#include <CuSiddon.h>
#include <float.h>
// Variables de Memoria constante utilizadas en Siddon. Se debe encargar de cargar los datos de forma rpevia a la reconstrucción.
__device__ __constant__ float dummy2; // Esto lo tuve que agregar porque el cudaMemcpyToSymbol me tiraba error con la primera variable declarada acá, sea cual sea.
extern __device__ __constant__ float d_AxialFov_mm;
extern __device__ __constant__ float d_RadioFov_mm;
extern __device__ __constant__ SizeImage d_imageSize;
extern __device__ __constant__ int d_numPixelsPerSlice;
extern __device__ __constant__ int d_numBinsSino2d;
extern __device__ __constant__ float d_ringSize_mm;
surface<void, cudaSurfaceType3D> surfImage;
// This function calculates Siddon Wieghts for a lor. It gets as parameters, the LOR direction vector in
// a float4*, the first point of the lor in a float4, a float* where a posible input must be loaded,
// a float* where the result will be stored, and a int that says in which mode are we working.
// The modes availables are: SENSIBILITY_IMAGE -> It doesn't need any input, the output is a Image
// PROJECTIO -> The input is a Image, and the output is a Michelogram
// BACKPROJECTION -> The input is a Michelogram and the output is a Image
// The size of the volume must be loaded first in the global and constant variable named d_imageSize
// and the size of the michelogram in cuda_michelogram_size
__device__ void cuSiddonWithSurfaces (float4* LOR, float4* P0, float* image, float* sinogram, int indiceMichelogram)
{
// Variables relacionadas con el parámetro alpha de la recta de la lor.
float alpha_x_1, alpha_x_2, alpha_y_1, alpha_y_2; // Valores de alpha para la intersección de la recta con el círculo del fov.
float alpha_x_min, alpha_y_min, alpha_x_max, alpha_y_max; // Valores de alpha de ambos puntos por coordenada, pero ahora separados por menor y mayor.
//float alpha_z_min, alpha_z_max;
float alpha_min, alpha_max; // Valores de alpha mínimo y máximo finales, o sea de entrada y salida al fov de la lor.
// Valores de alpha para recorrer la lor:
float alpha_x = FLT_MAX, alpha_y = FLT_MAX, alpha_z = FLT_MAX; // Valores de alpha si avanzo en x o en y, siempre debo ir siguiendo al más cercano.
float alpha_x_u, alpha_y_u, alpha_z_u; // Valor de incremento de alpha según avance un píxel en x o en y.
float alpha_c; // Valor de alhpa actual, cuando se recorre la lor.
// Variables relacionadas con los índices de píxeles recorridos:
int i_min = 0, j_min = 0, k_min = 0; // Índices (i,j,k) del píxel de entrada al fov.
int i_max = 0, j_max = 0, k_max = 0; // Índices (i,j,k) del píxel de salida al fov.
int i, j, k; // Índices con que recorro los píxeles de la lor.
// Incrementos en píxeles. Puede ser +-1 según la dirección de la lor.
int i_incr = 0, j_incr = 0, k_incr = 0;
// Cantidad de píxeles intersectados:
int numIntersectedPixels;
// Punto de entrada y salida al fov trasladado al borde del píxel, ya que en esta versión el píxel
// de entrada se considera entero, y no desde el punto exacto de itnersección con el círculo:
float x_1_mm, x_2_mm, y_1_mm, y_2_mm, z_1_mm, z_2_mm;
// Largo de la lor teniendo en cuenta P0 y P1, y largo de la lor dentro del fov:
float rayLength_mm; //, rayLengthInFov_mm;
// For Fov cilindrico:
// // Cálculo de intersección de la lor con un fov cilíndrico.
// // Las lors siempre interesectan las caras curvas del cilindro y no las tapas. Ya
// // que el fov de los scanner está limitado por eso.
// // Lo calculo como la intersección entre la recta y una circunferencia de radio cudaRFOV. La ecuación a resolver es:
// // (X0+alpha*Vx).^2+(Y0+alpha*Vy).^2=cudaRFOV.^2
// // alpha = (-2*(Vx+Vy)+sqrt(4*Vx^2*(1-c)+4*Vy^2*(1-c) + 8(Vx+Vy)))/(2*(Vx^2+Vy^2))
// //float c = P0->x*P0->x + P0->y*P0->y - cudaRFOV*cudaRFOV;
// float segundoTermino = sqrt(4.0f*(LOR->x*LOR->x*(d_RadioFov_mm*d_RadioFov_mm-P0->y*P0->y)
// +LOR->y*LOR->y*(d_RadioFov_mm*d_RadioFov_mm-P0->x*P0->x)) + 8.0f*LOR->x*P0->x*LOR->y*P0->y);
//
// // Obtengo los valores de alpha donde se intersecciona la recta con la circunferencia.
// // Como la debería cruzar en dos puntos hay dos soluciones.
// alpha_xy_1 = (-2*(LOR->x*P0->x+LOR->y*P0->y) + segundoTermino)/(2*(LOR->x*LOR->x+LOR->y*LOR->y));
// alpha_xy_2 = (-2*(LOR->x*P0->x+LOR->y*P0->y) - segundoTermino)/(2*(LOR->x*LOR->x+LOR->y*LOR->y));
//
// // Valores de alpha de entrada y de salida. El de entrada es el menor, porque la lor
// // se recorre desde P0 a P1.
// alpha_min = min(alpha_xy_1, alpha_xy_2);
// alpha_max = max(alpha_xy_1, alpha_xy_2);
// Para FOV cuadrado:
// Obtengo la intersección de la lor con las rectas x=-rFov_mm x=rFov_mm y=-rFov_mm y =rFov_mm
// Para dichos valores verifico que la otra coordenada este dentro de los valores, y obtengo
// los puntos de entrada y salida de la lor. No me fijo z, porque no debería ingresar por las
// tapas del cilindro, al menos que haya algún error entre el sinograma y la imagen de entrada.
float minValueX_mm = -d_RadioFov_mm;
float minValueY_mm = -d_RadioFov_mm;
float maxValueX_mm = d_RadioFov_mm;
float maxValueY_mm = d_RadioFov_mm;
// Calculates alpha values for the inferior planes (entry planes) of the FOV
if(LOR->x == 0) // Parallel to x axis
{
alpha_y_1 = (minValueY_mm - P0->y) / LOR->y;
alpha_y_2 = (maxValueY_mm - P0->y) / LOR->y;
if(alpha_y_1 < alpha_y_2)
{
alpha_min = alpha_y_1;
alpha_max = alpha_y_2;
}
else
{
alpha_min = alpha_y_2;
alpha_max = alpha_y_1;
}
}
else if(LOR->y == 0) // Parallel to y axis.
{
alpha_x_1 = (minValueX_mm - P0->x) / LOR->x;
alpha_x_2 = (maxValueX_mm - P0->x) / LOR->x;
if(alpha_x_1 < alpha_x_2)
{
alpha_min = alpha_x_1;
alpha_max = alpha_x_2;
}
else
{
alpha_min = alpha_x_2;
alpha_max = alpha_x_1;
}
}
else
{
alpha_x_1 = (minValueX_mm - P0->x) / LOR->x;
alpha_y_1 = (minValueY_mm - P0->y) / LOR->y;
// Calculates alpha values for superior planes ( going out planes) of the fov
alpha_x_2 = (maxValueX_mm - P0->x) / LOR->x; // ValuesX has one more element than pixels in X, thats we can use InputVolume->SizeX as index for the las element
alpha_y_2 = (maxValueY_mm - P0->y) / LOR->y;
//alpha min
alpha_x_min = min(alpha_x_1, alpha_x_2);
alpha_y_min = min(alpha_y_1, alpha_y_2);
//alpha_y_min = max((float)0, alpha_y_min);
alpha_min = max(alpha_x_min, alpha_y_min); //
//alpha max
alpha_x_max = max(alpha_x_1, alpha_x_2);
alpha_y_max = max(alpha_y_1, alpha_y_2);
alpha_max = min(alpha_x_max, alpha_y_max);
}
// if the radius of the scanner is less than the diagonal (alpha less than 0), the entry point should be P0
if ((alpha_min<0)||(alpha_min>1)) // I added (alpha_min>1), because for aprallel lors to an axis, both alphas can be positiver or negative.
alpha_min = 0;
// if the radius of the scanner is less than the diagonal (alpha less than 0), the entry point should be P0
if ((alpha_max>1)||(alpha_max<0))
alpha_max = 1;
// Fin para Fov Cuadrado.
// Coordenadas dentro de la imagen de los dos puntos de entrada:
x_1_mm = P0->x + LOR->x * alpha_min;
y_1_mm = P0->y + LOR->y * alpha_min;
z_1_mm = P0->z + LOR->z * alpha_min;
x_2_mm = P0->x + LOR->x * alpha_max;
y_2_mm = P0->y + LOR->y * alpha_max;
z_2_mm = P0->z + LOR->z * alpha_max;
//rayLengthInFov_mm = sqrt((x_2_mm-x_1_mm) * (x_2_mm-x_1_mm) + (y_2_mm-y_1_mm) * (y_2_mm-y_1_mm) + (z_2_mm-z_1_mm) * (z_2_mm-z_1_mm));
// Distancia total de la LOR. Es la distancia entre los puntos P0 y P1, habitualmente, esos son
// los puntos de la lor sobre el detector.
rayLength_mm = sqrt(((P0->x + LOR->x) - P0->x) * ((P0->x + LOR->x) - P0->x)
+ ((P0->y + LOR->y) - P0->y) * ((P0->y + LOR->y) - P0->y)
+ ((P0->z + LOR->z) - P0->z) * ((P0->z + LOR->z) - P0->z));
float offsetZ_mm = d_imageSize.sizePixelZ_mm/2;//(SCANNER_ZFOV - cudaZFOV)/2;
#ifdef __DEBUG__
if((z_1_mm < offsetZ_mm)||(z_1_mm > (d_AxialFov_mm-offsetZ_mm)) || (z_2_mm < offsetZ_mm)||(z_2_mm > (d_AxialFov_mm-offsetZ_mm)))
{
// La lor entra por las tapas del clindro del fov:
printf("Warning: Lor que entra por las tapas del cilindro del FoV.\n");
}
#endif
// Con el alhpa_min y el alpha_max tengo los puntos de entrada y salida al fov. De los cuales obtengo
// los índices de los píxeles de entrada y salida del fov.
// En este caso me interesa el píxel de entrada, para luego considerarlo entero,
// por más que la entrada al fov sea en un punto intermedio:
i_min = floorf((x_1_mm + d_RadioFov_mm)/d_imageSize.sizePixelX_mm); // In X increase of System Coordinate = Increase Pixels.
j_min = floorf((y_1_mm + d_RadioFov_mm)/d_imageSize.sizePixelY_mm);
k_min = floorf((z_1_mm - offsetZ_mm)/d_imageSize.sizePixelZ_mm);
i_max = floorf((x_2_mm + d_RadioFov_mm)/d_imageSize.sizePixelX_mm); // In X increase of System Coordinate = Increase Pixels.
j_max = floorf((y_2_mm + d_RadioFov_mm)/d_imageSize.sizePixelY_mm); //
k_max = floorf((z_2_mm - offsetZ_mm)/d_imageSize.sizePixelZ_mm);
// Esta verificación y corrección la saco, porque a veces por error de redondeo puede quedar en el píxel -1 o en sizePixel
#ifdef __DEBUG__
// Verifico que los índices de i y j dieron dentro de la imagen, sino es que que estoy fuera del fov.
if(((i_min<0)||(i_max<0))||((j_min<0)||(j_max<0))||((k_min<0)||(k_max<0))||((i_min>=d_imageSize.nPixelsX)||(i_max>=d_imageSize.nPixelsX))||
((j_min>=d_imageSize.nPixelsY)||(j_max>=d_imageSize.nPixelsY))||((k_min>=d_imageSize.nPixelsZ)||(k_max>=d_imageSize.nPixelsZ)))
{
// Por error de redondeo puede caer al límite:
printf("Indices fuera de imagen. Pixel min: (%d,%d,%d) (%f,%f,%f)mm. Pixel max: (%d,%d,%d) (%f,%f,%f)mm.\n", i_min, j_min, k_min, x_1_mm, y_1_mm, z_1_mm, i_max, j_max, k_max, x_2_mm, y_2_mm, z_2_mm);
return;
}
#endif
// Cantidad de píxeles intersectados:
numIntersectedPixels = abs(i_max - i_min) + abs(j_max - j_min) + abs(k_max - k_min) + 2; // +0 in each dimension(for getting the amount of itnersections) -1 toget pixels> 3x1-1 = +2
// Pixels increments
// A partir del (i_min,j_min) voy recorriendo la lor, para determinar el índice del próximo píxel, o sea
// saber si avanzo en i o en j, debo ver si el cambio se da en x o en y. Para esto en cada avance se calcula
// el valor de alpha si avanzar un píxel en i (o x) y el valor de alpha en j (o y). De estos dos valores: alpha_x
// y alpha_y, el que sea menor indicará en que sentido tengo que avanzar con el píxel.
i_incr = 0, j_incr = 0, k_incr = 0; //The increments are zero (perpendicular liine) if Vx = 0 for i, and so on
if(LOR->x > 0)
{
i_incr = 1;
alpha_x = ( -d_RadioFov_mm + (i_min + i_incr) * d_imageSize.sizePixelX_mm - P0->x ) / LOR->x;
}
else if(LOR->x < 0)
{
i_incr = -1;
alpha_x = ( -d_RadioFov_mm + i_min * d_imageSize.sizePixelX_mm - P0->x ) / LOR->x;
}
/*else
alpha_x = FLT_MAX;*/ // I can avoid this because I initialize with this value.
if(LOR->y > 0)
{
j_incr = 1; // Remeber than in Y and Z the increase in the SystemCoordinate means a decreas in the pixel index
alpha_y = ( -d_RadioFov_mm + (j_min + j_incr) * d_imageSize.sizePixelY_mm - P0->y ) / LOR->y;
}
else if(LOR->y < 0)
{
j_incr = -1;
alpha_y = ( -d_RadioFov_mm + j_min * d_imageSize.sizePixelY_mm - P0->y) / LOR->y;
}
/*if (alpha_y <0)
alpha_y = FLT_MAX;
*/
if(LOR->z > 0)
{
k_incr = 1; // Remeber than in Y and Z the increase in the SystemCoordinate means a decreas in the pixel index
alpha_z = ( offsetZ_mm + (k_min + k_incr) * d_imageSize.sizePixelZ_mm - P0->z) / LOR->z;
}
else if(LOR->z < 0)
{
k_incr = -1;
alpha_z = ( offsetZ_mm + k_min * d_imageSize.sizePixelZ_mm - P0->z ) / LOR->z;
}
/*if (alpha_z <0)
alpha_z = FLT_MAX;
*/
// Incremento en los valores de alpha, según se avanza un píxel en x o en y.
alpha_x_u = fabsf(d_imageSize.sizePixelX_mm / (LOR->x)); //alpha_x_u = DistanciaPixelX / TotalDelRayo - Remember that Vx must be loaded in order to be the diference in X between the two points of the lor
alpha_y_u = fabsf(d_imageSize.sizePixelY_mm / (LOR->y));
alpha_z_u = fabsf(d_imageSize.sizePixelZ_mm / (LOR->z));
// En alpha_c voy guardando el valor de alpha con el que voy recorriendo los píxeles.
alpha_c = alpha_min;
// Inicialización de i,j a sus valores de entrada.
i = i_min;
j = j_min;
k = k_min;
// Recorro la lor y guardo los segmentos en la lista de salida.
float siddonWeight = 0, result = 0, aux = 0;
aux = sinogram[indiceMichelogram]; // Use a register to accelerate the access to the value of the sinogram bin.
for(int m = 0; m < numIntersectedPixels; m++)
{
if((alpha_x <= alpha_y) && (alpha_x <= alpha_z))
{
// Cruce por el plano x: avanzo en i.
siddonWeight = (alpha_x - alpha_c) * rayLength_mm;
i += i_incr;
alpha_c = alpha_x;
alpha_x += alpha_x_u;
}
else if((alpha_y <= alpha_x) && (alpha_y <= alpha_z))
{
siddonWeight = (alpha_y - alpha_c) * rayLength_mm;
// Cruce por el plano y: avanzo en j.
j += j_incr;
alpha_c = alpha_y;
alpha_y += alpha_y_u;
}
else
{
// Cruce por el plano y: avanzo en j.
siddonWeight = (alpha_z - alpha_c) * rayLength_mm;
k += k_incr;
alpha_c = alpha_z;
alpha_z += alpha_z_u;
}
//aux = tex3D(texImage,i+0.5f,j+0.5f,k+0.5f);
// Write to output surface
surf3Dread(&result, surfImage,i*sizeof(float),j,k, cudaBoundaryModeClamp);
result = result + siddonWeight * aux;
surf3Dwrite(result, surfImage,i*sizeof(float),j,k, cudaBoundaryModeClamp);
//atomicAdd(Result+indicePixel, siddonWeight * d_imageSize.sizePixelX_mm);
}
}
#endif
|
ad6c924adbf1f71a54faae5827f02e3a3ea3f499.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/opencv.hpp>
#include <thrust/window_2d.h>
using namespace cv;
__constant__ float constantTransformMatrix [6];
class AffineTransformFunctor
{
public:
thrust::block_2d<uchar> *outBlock;
AffineTransformFunctor(thrust::block_2d<uchar> * outBlock)
{
this->outBlock = outBlock->device_pointer;
}
__device__ void operator() (const thrust::window_2d<uchar> &inputWindow) const
{
int x_out, y_out;
x_out = (int)(constantTransformMatrix[0]*inputWindow.start_x+constantTransformMatrix[1]*inputWindow.start_y+constantTransformMatrix[2]*1);
y_out = (int)(constantTransformMatrix[0+3]*inputWindow.start_x+constantTransformMatrix[1+3]*inputWindow.start_y+constantTransformMatrix[2+3]*1);
(*outBlock)[y_out][x_out]=inputWindow[0][0];
}
};
int main(int argc, char const *argv[]) {
hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop,0);
Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat image;
int dim = 512;
if(argc ==2)
{
dim = atoi(argv[1]);
}
resize(small,image,Size(dim,dim));
thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows,0.0f);
thrust::block_2d<uchar> outBlock (image.cols,image.rows,0.0f);
uchar * img = (uchar * )malloc(sizeof(uchar)*(image.cols*image.rows));
for(int i = 0; i<image.cols*image.rows;i++)
{
img[i]=(uchar)image.ptr()[i];
}
uchar_image_block.upload(img);
Point2f srcTri[3];
Point2f dstTri[3];
Mat warp_mat( 2, 3, CV_32FC1 );
/// Set your 3 points to calculate the Affine Transform
srcTri[0] = Point2f( 0,0 );
srcTri[1] = Point2f( image.cols - 1, 0 );
srcTri[2] = Point2f( 0, image.rows - 1 );
dstTri[0] = Point2f( image.cols*0.0, image.rows*0.5 );
dstTri[1] = Point2f( image.cols*0.8, image.rows*0.2 );
dstTri[2] = Point2f( image.cols*0.2, image.rows*0.7 );
/// Get the Affine Transform
warp_mat = getAffineTransform( srcTri, dstTri );
warp_mat.convertTo(warp_mat,CV_32FC1);
hipMemcpyToSymbol(constantTransformMatrix,warp_mat.ptr(),sizeof(float)*warp_mat.rows*warp_mat.cols);
thrust::window_vector<uchar> inputVector(&uchar_image_block,1,1,1,1);
AffineTransformFunctor atf(&outBlock);
thrust::for_each(inputVector.begin(),inputVector.end(),atf);
unsigned char * outputFloatImageData = (unsigned char *)malloc(sizeof(unsigned char)*(uchar_image_block.end()-uchar_image_block.begin()));
outBlock.download(&img);
for(int i = 0; i<image.cols*image.rows;i++)
{
outputFloatImageData[i]=(unsigned char)img[i];
}
Mat output (Size(image.cols,image.rows),CV_8UC1,outputFloatImageData);
#ifdef OWRITE
cv::imwrite("ainput.png",image);
cv::imwrite("aoutput.png",output);
#endif
#ifdef SHOW
cv::imshow("ainput.png",image);
cv::imshow("aoutput.png",output);
cv::waitKey(0);
#endif
return 0;
}
| ad6c924adbf1f71a54faae5827f02e3a3ea3f499.cu | #include <opencv2/opencv.hpp>
#include <thrust/window_2d.h>
using namespace cv;
__constant__ float constantTransformMatrix [6];
class AffineTransformFunctor
{
public:
thrust::block_2d<uchar> *outBlock;
AffineTransformFunctor(thrust::block_2d<uchar> * outBlock)
{
this->outBlock = outBlock->device_pointer;
}
__device__ void operator() (const thrust::window_2d<uchar> &inputWindow) const
{
int x_out, y_out;
x_out = (int)(constantTransformMatrix[0]*inputWindow.start_x+constantTransformMatrix[1]*inputWindow.start_y+constantTransformMatrix[2]*1);
y_out = (int)(constantTransformMatrix[0+3]*inputWindow.start_x+constantTransformMatrix[1+3]*inputWindow.start_y+constantTransformMatrix[2+3]*1);
(*outBlock)[y_out][x_out]=inputWindow[0][0];
}
};
int main(int argc, char const *argv[]) {
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop,0);
Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat image;
int dim = 512;
if(argc ==2)
{
dim = atoi(argv[1]);
}
resize(small,image,Size(dim,dim));
thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows,0.0f);
thrust::block_2d<uchar> outBlock (image.cols,image.rows,0.0f);
uchar * img = (uchar * )malloc(sizeof(uchar)*(image.cols*image.rows));
for(int i = 0; i<image.cols*image.rows;i++)
{
img[i]=(uchar)image.ptr()[i];
}
uchar_image_block.upload(img);
Point2f srcTri[3];
Point2f dstTri[3];
Mat warp_mat( 2, 3, CV_32FC1 );
/// Set your 3 points to calculate the Affine Transform
srcTri[0] = Point2f( 0,0 );
srcTri[1] = Point2f( image.cols - 1, 0 );
srcTri[2] = Point2f( 0, image.rows - 1 );
dstTri[0] = Point2f( image.cols*0.0, image.rows*0.5 );
dstTri[1] = Point2f( image.cols*0.8, image.rows*0.2 );
dstTri[2] = Point2f( image.cols*0.2, image.rows*0.7 );
/// Get the Affine Transform
warp_mat = getAffineTransform( srcTri, dstTri );
warp_mat.convertTo(warp_mat,CV_32FC1);
cudaMemcpyToSymbol(constantTransformMatrix,warp_mat.ptr(),sizeof(float)*warp_mat.rows*warp_mat.cols);
thrust::window_vector<uchar> inputVector(&uchar_image_block,1,1,1,1);
AffineTransformFunctor atf(&outBlock);
thrust::for_each(inputVector.begin(),inputVector.end(),atf);
unsigned char * outputFloatImageData = (unsigned char *)malloc(sizeof(unsigned char)*(uchar_image_block.end()-uchar_image_block.begin()));
outBlock.download(&img);
for(int i = 0; i<image.cols*image.rows;i++)
{
outputFloatImageData[i]=(unsigned char)img[i];
}
Mat output (Size(image.cols,image.rows),CV_8UC1,outputFloatImageData);
#ifdef OWRITE
cv::imwrite("ainput.png",image);
cv::imwrite("aoutput.png",output);
#endif
#ifdef SHOW
cv::imshow("ainput.png",image);
cv::imshow("aoutput.png",output);
cv::waitKey(0);
#endif
return 0;
}
|
735b2c1ad77eaa62b7b9631f3a3eddeeca13a12a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2015, September 7 - October 6
// ###
// ###
// ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include "aux.h"
#include <iostream>
#include <cmath>
using namespace std;
// uncomment to use the camera
//#define CAMERA
__global__ void gradient (float *d_imgIn, float *d_gradH, float *d_gradV, int w, int h, int nc)
{
size_t ind = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if (ind < w * h * nc)
{
// Gradient in horizontal direction
bool isBoundary = (ind % w == w - 1);
d_gradH[ind] = (isBoundary ? 0 : (d_imgIn[ind + 1] - d_imgIn[ind]));
// Gradient in vertical direction
isBoundary = (ind % (w * h) >= (w * (h - 1)));
d_gradV[ind] = (isBoundary ? 0 : (d_imgIn[ind + w] - d_imgIn[ind]));
}
}
__global__ void divergence (float *d_imgHGrad, float *d_imgVGrad, float *d_imgLapl, int w, int h, int nc)
{
size_t ind = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if (ind < w * h * nc)
{
// Backward difference gradient in horizontal direction
bool isBoundary = (ind % w == 0);
float horGrad = (isBoundary ? 0 : (d_imgHGrad[ind] - d_imgHGrad[ind - 1]));
// Backward difference gradient in vertical direction
isBoundary = (ind % (w * h) < w);
float verGrad = (isBoundary ? 0 : (d_imgVGrad[ind ] - d_imgVGrad[ind - w]));
d_imgLapl[ind] = horGrad + verGrad;
}
}
__global__ void l2_norm (float *d_imgLapl, float *d_imgAbsLapl, int w, int h, int nc)
{
size_t ind = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if (ind < w * h)
{
d_imgAbsLapl[ind] = 0;
for (int c = 0; c < nc; c++)
{
d_imgAbsLapl[ind] += d_imgLapl[ind + c * w * h] * d_imgLapl[ind + c * w * h];
}
d_imgAbsLapl[ind] = sqrt(d_imgAbsLapl[ind]);
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
hipDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> -g <gamma>[-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// Matrix for absolute of Laplacial
cv::Mat mOut(h, w, CV_32FC1);
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w * h * nc];
// allocate raw output arrays for all the intermediate values
float *imgOut = new float[(size_t)w * h];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
// MAIN COMPUTATION
// Init image array on the device
float *d_imgIn = NULL;
float *d_imgHGrad = NULL;
float *d_imgVGrad = NULL;
float *d_imgLapl = NULL;
float *d_imgAbsLapl = NULL;
hipMalloc(&d_imgIn, w * h * nc * sizeof(float)); CUDA_CHECK;
hipMalloc(&d_imgHGrad, w * h * nc * sizeof(float)); CUDA_CHECK;
hipMalloc(&d_imgVGrad, w * h * nc * sizeof(float)); CUDA_CHECK;
hipMalloc(&d_imgLapl, w * h * nc * sizeof(float)); CUDA_CHECK;
hipMalloc(&d_imgAbsLapl, w * h * sizeof(float)); CUDA_CHECK;
// move from host to device memory
hipMemcpy(d_imgIn, imgIn, w * h * nc * sizeof(float), hipMemcpyHostToDevice); CUDA_CHECK;
dim3 block;
dim3 grid;
Timer timer; timer.start();
for (int rep = 0; rep < repeats; rep++)
{
// initialize block and grid size
block = dim3(64, 1, 1);
grid = dim3((w * h * nc + block.x * block.y * block.z - 1) / (block.x * block.y * block.z), 1, 1);
hipLaunchKernelGGL(( gradient) , dim3(grid), dim3(block), 0, 0, d_imgIn, d_imgHGrad, d_imgVGrad, w, h, nc);
hipLaunchKernelGGL(( divergence) , dim3(grid), dim3(block), 0, 0, d_imgHGrad, d_imgVGrad, d_imgLapl, w, h, nc);
// adjust the grid size because now we only need 1 kernel per pixel for ALL the channels
grid = dim3((w * h + block.x * block.y * block.z - 1) / (block.x * block.y * block.z), 1, 1);
hipLaunchKernelGGL(( l2_norm) , dim3(grid), dim3(block), 0, 0, d_imgLapl, d_imgAbsLapl, w, h, nc);
}
timer.end(); float t = timer.get(); // elapsed time in seconds
cout << "average kernel time: " << t*1000/repeats << " ms" << endl;
// copy result back to host memory
hipMemcpy(imgOut, d_imgAbsLapl, w * h * sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;
// free the device memory
hipFree(d_imgIn); CUDA_CHECK;
hipFree(d_imgHGrad); CUDA_CHECK;
hipFree(d_imgVGrad); CUDA_CHECK;
hipFree(d_imgLapl); CUDA_CHECK;
hipFree(d_imgAbsLapl); CUDA_CHECK;
// // show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// // show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOut, imgOut);
showImage("Output", mOut, 100+w+40, 100);
// ### Display your own output images here as needed
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png", mIn * 255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_output.png", mOut * 255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
| 735b2c1ad77eaa62b7b9631f3a3eddeeca13a12a.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2015, September 7 - October 6
// ###
// ###
// ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include "aux.h"
#include <iostream>
#include <cmath>
using namespace std;
// uncomment to use the camera
//#define CAMERA
__global__ void gradient (float *d_imgIn, float *d_gradH, float *d_gradV, int w, int h, int nc)
{
size_t ind = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if (ind < w * h * nc)
{
// Gradient in horizontal direction
bool isBoundary = (ind % w == w - 1);
d_gradH[ind] = (isBoundary ? 0 : (d_imgIn[ind + 1] - d_imgIn[ind]));
// Gradient in vertical direction
isBoundary = (ind % (w * h) >= (w * (h - 1)));
d_gradV[ind] = (isBoundary ? 0 : (d_imgIn[ind + w] - d_imgIn[ind]));
}
}
__global__ void divergence (float *d_imgHGrad, float *d_imgVGrad, float *d_imgLapl, int w, int h, int nc)
{
size_t ind = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if (ind < w * h * nc)
{
// Backward difference gradient in horizontal direction
bool isBoundary = (ind % w == 0);
float horGrad = (isBoundary ? 0 : (d_imgHGrad[ind] - d_imgHGrad[ind - 1]));
// Backward difference gradient in vertical direction
isBoundary = (ind % (w * h) < w);
float verGrad = (isBoundary ? 0 : (d_imgVGrad[ind ] - d_imgVGrad[ind - w]));
d_imgLapl[ind] = horGrad + verGrad;
}
}
__global__ void l2_norm (float *d_imgLapl, float *d_imgAbsLapl, int w, int h, int nc)
{
size_t ind = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
if (ind < w * h)
{
d_imgAbsLapl[ind] = 0;
for (int c = 0; c < nc; c++)
{
d_imgAbsLapl[ind] += d_imgLapl[ind + c * w * h] * d_imgLapl[ind + c * w * h];
}
d_imgAbsLapl[ind] = sqrt(d_imgAbsLapl[ind]);
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
cudaDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> -g <gamma>[-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// Matrix for absolute of Laplacial
cv::Mat mOut(h, w, CV_32FC1);
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w * h * nc];
// allocate raw output arrays for all the intermediate values
float *imgOut = new float[(size_t)w * h];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
// MAIN COMPUTATION
// Init image array on the device
float *d_imgIn = NULL;
float *d_imgHGrad = NULL;
float *d_imgVGrad = NULL;
float *d_imgLapl = NULL;
float *d_imgAbsLapl = NULL;
cudaMalloc(&d_imgIn, w * h * nc * sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_imgHGrad, w * h * nc * sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_imgVGrad, w * h * nc * sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_imgLapl, w * h * nc * sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_imgAbsLapl, w * h * sizeof(float)); CUDA_CHECK;
// move from host to device memory
cudaMemcpy(d_imgIn, imgIn, w * h * nc * sizeof(float), cudaMemcpyHostToDevice); CUDA_CHECK;
dim3 block;
dim3 grid;
Timer timer; timer.start();
for (int rep = 0; rep < repeats; rep++)
{
// initialize block and grid size
block = dim3(64, 1, 1);
grid = dim3((w * h * nc + block.x * block.y * block.z - 1) / (block.x * block.y * block.z), 1, 1);
gradient <<<grid, block>>> (d_imgIn, d_imgHGrad, d_imgVGrad, w, h, nc);
divergence <<<grid, block>>> (d_imgHGrad, d_imgVGrad, d_imgLapl, w, h, nc);
// adjust the grid size because now we only need 1 kernel per pixel for ALL the channels
grid = dim3((w * h + block.x * block.y * block.z - 1) / (block.x * block.y * block.z), 1, 1);
l2_norm <<<grid, block>>> (d_imgLapl, d_imgAbsLapl, w, h, nc);
}
timer.end(); float t = timer.get(); // elapsed time in seconds
cout << "average kernel time: " << t*1000/repeats << " ms" << endl;
// copy result back to host memory
cudaMemcpy(imgOut, d_imgAbsLapl, w * h * sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;
// free the device memory
cudaFree(d_imgIn); CUDA_CHECK;
cudaFree(d_imgHGrad); CUDA_CHECK;
cudaFree(d_imgVGrad); CUDA_CHECK;
cudaFree(d_imgLapl); CUDA_CHECK;
cudaFree(d_imgAbsLapl); CUDA_CHECK;
// // show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// // show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOut, imgOut);
showImage("Output", mOut, 100+w+40, 100);
// ### Display your own output images here as needed
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png", mIn * 255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_output.png", mOut * 255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
|
3f6928fe2b3b49b3a9d1f0f3f0541335aec44eff.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include "linalg/strided_reduction.cuh"
#include "random/rng.cuh"
#include "reduce_hip.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct stridedReductionInputs {
T tolerance;
int rows, cols;
unsigned long long int seed;
};
template <typename T>
void stridedReductionLaunch(T *dots, const T *data, int cols, int rows,
hipStream_t stream) {
stridedReduction(dots, data, cols, rows, (T)0, stream, false,
[] __device__(T in, int i) { return in * in; });
}
template <typename T>
class stridedReductionTest
: public ::testing::TestWithParam<stridedReductionInputs<T>> {
protected:
void SetUp() override {
CUDA_CHECK(hipStreamCreate(&stream));
params = ::testing::TestWithParam<stridedReductionInputs<T>>::GetParam();
Random::Rng r(params.seed);
int rows = params.rows, cols = params.cols;
int len = rows * cols;
allocate(data, len);
allocate(dots_exp, cols); //expected dot products (from test)
allocate(dots_act, cols); //actual dot products (from prim)
r.uniform(data, len, T(-1.0), T(1.0),
stream); //initialize matrix to random
unaryAndGemv(dots_exp, data, cols, rows, stream);
stridedReductionLaunch(dots_act, data, cols, rows, stream);
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(dots_exp));
CUDA_CHECK(hipFree(dots_act));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
stridedReductionInputs<T> params;
T *data, *dots_exp, *dots_act;
hipStream_t stream;
};
const std::vector<stridedReductionInputs<float>> inputsf = {
{0.00001f, 1024, 32, 1234ULL},
{0.00001f, 1024, 64, 1234ULL},
{0.00001f, 1024, 128, 1234ULL},
{0.00001f, 1024, 256, 1234ULL}};
const std::vector<stridedReductionInputs<double>> inputsd = {
{0.000000001, 1024, 32, 1234ULL},
{0.000000001, 1024, 64, 1234ULL},
{0.000000001, 1024, 128, 1234ULL},
{0.000000001, 1024, 256, 1234ULL}};
typedef stridedReductionTest<float> stridedReductionTestF;
TEST_P(stridedReductionTestF, Result) {
ASSERT_TRUE(devArrMatch(dots_exp, dots_act, params.cols,
CompareApprox<float>(params.tolerance)));
}
typedef stridedReductionTest<double> stridedReductionTestD;
TEST_P(stridedReductionTestD, Result) {
ASSERT_TRUE(devArrMatch(dots_exp, dots_act, params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(stridedReductionTests, stridedReductionTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(stridedReductionTests, stridedReductionTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
| 3f6928fe2b3b49b3a9d1f0f3f0541335aec44eff.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include "linalg/strided_reduction.cuh"
#include "random/rng.cuh"
#include "reduce.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct stridedReductionInputs {
T tolerance;
int rows, cols;
unsigned long long int seed;
};
template <typename T>
void stridedReductionLaunch(T *dots, const T *data, int cols, int rows,
cudaStream_t stream) {
stridedReduction(dots, data, cols, rows, (T)0, stream, false,
[] __device__(T in, int i) { return in * in; });
}
template <typename T>
class stridedReductionTest
: public ::testing::TestWithParam<stridedReductionInputs<T>> {
protected:
void SetUp() override {
CUDA_CHECK(cudaStreamCreate(&stream));
params = ::testing::TestWithParam<stridedReductionInputs<T>>::GetParam();
Random::Rng r(params.seed);
int rows = params.rows, cols = params.cols;
int len = rows * cols;
allocate(data, len);
allocate(dots_exp, cols); //expected dot products (from test)
allocate(dots_act, cols); //actual dot products (from prim)
r.uniform(data, len, T(-1.0), T(1.0),
stream); //initialize matrix to random
unaryAndGemv(dots_exp, data, cols, rows, stream);
stridedReductionLaunch(dots_act, data, cols, rows, stream);
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(dots_exp));
CUDA_CHECK(cudaFree(dots_act));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
stridedReductionInputs<T> params;
T *data, *dots_exp, *dots_act;
cudaStream_t stream;
};
const std::vector<stridedReductionInputs<float>> inputsf = {
{0.00001f, 1024, 32, 1234ULL},
{0.00001f, 1024, 64, 1234ULL},
{0.00001f, 1024, 128, 1234ULL},
{0.00001f, 1024, 256, 1234ULL}};
const std::vector<stridedReductionInputs<double>> inputsd = {
{0.000000001, 1024, 32, 1234ULL},
{0.000000001, 1024, 64, 1234ULL},
{0.000000001, 1024, 128, 1234ULL},
{0.000000001, 1024, 256, 1234ULL}};
typedef stridedReductionTest<float> stridedReductionTestF;
TEST_P(stridedReductionTestF, Result) {
ASSERT_TRUE(devArrMatch(dots_exp, dots_act, params.cols,
CompareApprox<float>(params.tolerance)));
}
typedef stridedReductionTest<double> stridedReductionTestD;
TEST_P(stridedReductionTestD, Result) {
ASSERT_TRUE(devArrMatch(dots_exp, dots_act, params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(stridedReductionTests, stridedReductionTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(stridedReductionTests, stridedReductionTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
|
cb9ab48f7f12807b92d1c61ff6d4fae20491842c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <cusolverDn.h>
#include <fstream>
#include <iostream>
using namespace std;
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
typedef struct {
int idx;
double *el;
} Matrix;
int main(int argc, char*argv[])
{
if (argc<2) {
cout << "Input file not specified. Please, specify it as a first argument." << endl;
cout << "example: " << argv[0] << " train_data_matrix_T.txt" << endl;
return -1;
}
ifstream file(argv[1]);
ofstream S_diag("S_diag.txt");
ofstream U_rows("U_rows.txt");
ofstream VT_cols("VT_cols.txt");
if (!file)
{
cout << "Error opening file" << endl;
return -1;
}
int idx;
file >> idx;
if (argc>2) cout << "N=" << idx << endl;
// --- gesvd only supports Nrows >= Ncols
// --- column major memory ordering
const int m = 3;
const int n = 2;
const int lda = m;
// --- CUDA solver initialization
hipsolverDnHandle_t solver_handle;
hipblasHandle_t cublasH = NULL;
Matrix A, U, VT; //host matrices
A.el = new double[lda*n]; //....
U.el = new double[lda*n]; //...
VT.el = new double[lda*n]; //..
//reading from file into matrices
for (long i=0; i<(lda*n); i++){
file >> A.el[i];
double io = A.el[i];
printf("%1.9f, ",io);
}
printf("\n");
double S[n]; // singular value
double *d_rwork = NULL;
// --- cuSOLVE input/output parameters/arrays
int work_size = 0;
int info_gpu = 0;
int *devInfo; hipMalloc ((void**)&devInfo, sizeof(int));
// create cusolverDn/cublas handle
assert(hipsolverDnCreate(&solver_handle));
assert(hipblasCreate(&cublasH));
// --- Setting the device matrix and moving the host matrix to the device
double *d_A; hipMalloc ((void**)&d_A , sizeof(double)*lda*n);
hipMemcpy(d_A, A.el, sizeof(double)*lda*n, hipMemcpyHostToDevice);
// --- device side SVD workspace and matrices
double *d_S; hipMalloc ((void**)&d_S , sizeof(double)*n);
double *d_U; hipMalloc ((void**)&d_U , sizeof(double)*lda*m);
double *d_VT; hipMalloc ((void**)&d_VT , sizeof(double)*lda*n);
double *d_W; hipMalloc ((void**)&d_W , sizeof(double)*lda*n);
// --- CUDA SVD initialization
assert(hipsolverDnDgesvd_bufferSize(solver_handle,m,n,&work_size));
double *d_work; assert(hipMalloc((void**)&d_work , sizeof(double)*work_size));
// --- CUDA SVD execution
assert(hipsolverDnDgesvd (solver_handle,'A','A',m,n,d_A,lda,d_S,d_U,lda,d_VT,lda,d_work,work_size,d_rwork,devInfo));
assert(hipDeviceSynchronize());
// --- Moving the results from device to host
assert(hipMemcpy(U.el , d_U , sizeof(double)*lda*m, hipMemcpyDeviceToHost));
assert(hipMemcpy(VT.el, d_VT, sizeof(double)*lda*n, hipMemcpyDeviceToHost));
assert(hipMemcpy(S , d_S , sizeof(double)*n, hipMemcpyDeviceToHost));
assert(hipMemcpy(&info_gpu, devInfo, sizeof(int), hipMemcpyDeviceToHost));
assert(0 == info_gpu);
printf("Singular values\n");
for (int k=0; k<n; k++){
S_diag << S[k] << " ";
}
printf("=====\n");
printf("\nLeft singular vectors - For y = A * x, the columns of U span the space of y\n");
for (int k=0; k<m; k++){
for (int j=0; j<m; j++)
U_rows << U.el[k+j*lda] << " ";
U_rows << endl;
}
printf("=====\n");
printf("\nRight singular vectors - For y = A * x, the columns of V span the space of x\n");
for (int k=0; k<n; k++){
for (int j=0; j<n; j++)
VT_cols << VT.el[k+j*lda] << " ";
VT_cols << endl;
}
if (solver_handle) hipsolverDnDestroy(solver_handle);
hipDeviceReset();
return 0;
}
| cb9ab48f7f12807b92d1c61ff6d4fae20491842c.cu | #include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
#include <fstream>
#include <iostream>
using namespace std;
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
typedef struct {
int idx;
double *el;
} Matrix;
int main(int argc, char*argv[])
{
if (argc<2) {
cout << "Input file not specified. Please, specify it as a first argument." << endl;
cout << "example: " << argv[0] << " train_data_matrix_T.txt" << endl;
return -1;
}
ifstream file(argv[1]);
ofstream S_diag("S_diag.txt");
ofstream U_rows("U_rows.txt");
ofstream VT_cols("VT_cols.txt");
if (!file)
{
cout << "Error opening file" << endl;
return -1;
}
int idx;
file >> idx;
if (argc>2) cout << "N=" << idx << endl;
// --- gesvd only supports Nrows >= Ncols
// --- column major memory ordering
const int m = 3;
const int n = 2;
const int lda = m;
// --- CUDA solver initialization
cusolverDnHandle_t solver_handle;
cublasHandle_t cublasH = NULL;
Matrix A, U, VT; //host matrices
A.el = new double[lda*n]; //....
U.el = new double[lda*n]; //...
VT.el = new double[lda*n]; //..
//reading from file into matrices
for (long i=0; i<(lda*n); i++){
file >> A.el[i];
double io = A.el[i];
printf("%1.9f, ",io);
}
printf("\n");
double S[n]; // singular value
double *d_rwork = NULL;
// --- cuSOLVE input/output parameters/arrays
int work_size = 0;
int info_gpu = 0;
int *devInfo; cudaMalloc ((void**)&devInfo, sizeof(int));
// create cusolverDn/cublas handle
assert(cusolverDnCreate(&solver_handle));
assert(cublasCreate(&cublasH));
// --- Setting the device matrix and moving the host matrix to the device
double *d_A; cudaMalloc ((void**)&d_A , sizeof(double)*lda*n);
cudaMemcpy(d_A, A.el, sizeof(double)*lda*n, cudaMemcpyHostToDevice);
// --- device side SVD workspace and matrices
double *d_S; cudaMalloc ((void**)&d_S , sizeof(double)*n);
double *d_U; cudaMalloc ((void**)&d_U , sizeof(double)*lda*m);
double *d_VT; cudaMalloc ((void**)&d_VT , sizeof(double)*lda*n);
double *d_W; cudaMalloc ((void**)&d_W , sizeof(double)*lda*n);
// --- CUDA SVD initialization
assert(cusolverDnDgesvd_bufferSize(solver_handle,m,n,&work_size));
double *d_work; assert(cudaMalloc((void**)&d_work , sizeof(double)*work_size));
// --- CUDA SVD execution
assert(cusolverDnDgesvd (solver_handle,'A','A',m,n,d_A,lda,d_S,d_U,lda,d_VT,lda,d_work,work_size,d_rwork,devInfo));
assert(cudaDeviceSynchronize());
// --- Moving the results from device to host
assert(cudaMemcpy(U.el , d_U , sizeof(double)*lda*m, cudaMemcpyDeviceToHost));
assert(cudaMemcpy(VT.el, d_VT, sizeof(double)*lda*n, cudaMemcpyDeviceToHost));
assert(cudaMemcpy(S , d_S , sizeof(double)*n, cudaMemcpyDeviceToHost));
assert(cudaMemcpy(&info_gpu, devInfo, sizeof(int), cudaMemcpyDeviceToHost));
assert(0 == info_gpu);
printf("Singular values\n");
for (int k=0; k<n; k++){
S_diag << S[k] << " ";
}
printf("=====\n");
printf("\nLeft singular vectors - For y = A * x, the columns of U span the space of y\n");
for (int k=0; k<m; k++){
for (int j=0; j<m; j++)
U_rows << U.el[k+j*lda] << " ";
U_rows << endl;
}
printf("=====\n");
printf("\nRight singular vectors - For y = A * x, the columns of V span the space of x\n");
for (int k=0; k<n; k++){
for (int j=0; j<n; j++)
VT_cols << VT.el[k+j*lda] << " ";
VT_cols << endl;
}
if (solver_handle) cusolverDnDestroy(solver_handle);
cudaDeviceReset();
return 0;
}
|
187f3327938ea3b8fbb157bcf742d0e9de5f0e36.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
namespace filter
{
template void linearColumn<float4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| 187f3327938ea3b8fbb157bcf742d0e9de5f0e36.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
namespace filter
{
template void linearColumn<float4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
65173d8d5c8157454b6f5622a1376540d0d982e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/symmetric_dropout_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SymmetricDropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index % (n/2)] > threshold) * scale;
}
}
template <typename Dtype>
void SymmetricDropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count/2, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SymmetricDropoutForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void SymmetricDropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index % (n/2)] > threshold);
}
}
template <typename Dtype>
void SymmetricDropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->phase_ == TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SymmetricDropoutBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SymmetricDropoutLayer);
} // namespace caffe
| 65173d8d5c8157454b6f5622a1376540d0d982e3.cu | #include <vector>
#include "caffe/layers/symmetric_dropout_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SymmetricDropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index % (n/2)] > threshold) * scale;
}
}
template <typename Dtype>
void SymmetricDropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count/2, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
SymmetricDropoutForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void SymmetricDropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index % (n/2)] > threshold);
}
}
template <typename Dtype>
void SymmetricDropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->phase_ == TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
SymmetricDropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SymmetricDropoutLayer);
} // namespace caffe
|
7da8800513460bd5f695c80a3b39afc41f17720f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#include <cstdio>
#include "choose.h"
//#define N 4
#define MAXWRITE 4
__global__ void compact(int *out, int*in) {
__shared__ unsigned num[N];
__shared__ unsigned idx[N];
unsigned t = threadIdx.x;
// (i) number of times to repeat element
num[t] = CHOOSE(in[t], MAXWRITE);
// (ii) compute indexes for scatter
// using an exclusive prefix sum
__syncthreads();
if (t < N/2) {
idx[2*t] = num[2*t];
idx[2*t+1] = num[2*t+1];
}
// (a) upsweep
int offset = 1;
for (unsigned d = N/2; d > 0; d /= 2) {
__syncthreads();
if (t < d) {
int ai = offset * (2 * t + 1) - 1;
int bi = offset * (2 * t + 2) - 1;
idx[bi] += idx[ai];
}
offset *= 2;
}
// (b) downsweep
if (t == 0) idx[N-1] = 0;
for (unsigned d = 1; d < N; d *= 2) {
offset /= 2;
__syncthreads();
if (t < d) {
int ai = offset * (2 * t + 1) - 1;
int bi = offset * (2 * t + 2) - 1;
int temp = idx[ai];
idx[ai] = idx[bi];
idx[bi] += temp;
}
}
__syncthreads();
// end of exclusive prefix sum of flag into idx
// (iii) repeat element num times
for (unsigned i = 0; i < num[t]; ++i) {
out[idx[t]+i] = in[t];
}
}
int main(int argc, char **argv) {
// test data
size_t ArraySize = N * sizeof(int);
size_t OutArraySize = (MAXWRITE-1) * N * sizeof(int);
int *in = (int *)malloc(ArraySize);
int *out = (int *)malloc(OutArraySize);
klee_make_symbolic(in, ArraySize, "in");
// create some memory objects on the device
int *d_in;
int *d_out;
hipMalloc((void **)&d_in, ArraySize);
hipMalloc((void **)&d_out, ArraySize);
// memcpy into these objects
hipMemcpy(d_in, in, ArraySize, hipMemcpyHostToDevice);
// run the kernel
hipLaunchKernelGGL(( compact), dim3(1),dim3(N), 0, 0, d_out, d_in);
// memcpy back the result
hipMemcpy(out, d_out, OutArraySize, hipMemcpyDeviceToHost);
#ifndef _SYM
// check results
unsigned idx = 0;
for (unsigned i=0; i<N; ++i) {
unsigned num = CHOOSE(in[i], MAXWRITE);
for (unsigned j=0; j<num; ++j) {
assert(out[idx+j] == in[i]);
}
idx += num;
}
printf("TEST PASSED\n");
#endif
// cleanup
free(in);
free(out);
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 7da8800513460bd5f695c80a3b39afc41f17720f.cu | #include <cassert>
#include <cstdio>
#include "choose.h"
//#define N 4
#define MAXWRITE 4
__global__ void compact(int *out, int*in) {
__shared__ unsigned num[N];
__shared__ unsigned idx[N];
unsigned t = threadIdx.x;
// (i) number of times to repeat element
num[t] = CHOOSE(in[t], MAXWRITE);
// (ii) compute indexes for scatter
// using an exclusive prefix sum
__syncthreads();
if (t < N/2) {
idx[2*t] = num[2*t];
idx[2*t+1] = num[2*t+1];
}
// (a) upsweep
int offset = 1;
for (unsigned d = N/2; d > 0; d /= 2) {
__syncthreads();
if (t < d) {
int ai = offset * (2 * t + 1) - 1;
int bi = offset * (2 * t + 2) - 1;
idx[bi] += idx[ai];
}
offset *= 2;
}
// (b) downsweep
if (t == 0) idx[N-1] = 0;
for (unsigned d = 1; d < N; d *= 2) {
offset /= 2;
__syncthreads();
if (t < d) {
int ai = offset * (2 * t + 1) - 1;
int bi = offset * (2 * t + 2) - 1;
int temp = idx[ai];
idx[ai] = idx[bi];
idx[bi] += temp;
}
}
__syncthreads();
// end of exclusive prefix sum of flag into idx
// (iii) repeat element num times
for (unsigned i = 0; i < num[t]; ++i) {
out[idx[t]+i] = in[t];
}
}
int main(int argc, char **argv) {
// test data
size_t ArraySize = N * sizeof(int);
size_t OutArraySize = (MAXWRITE-1) * N * sizeof(int);
int *in = (int *)malloc(ArraySize);
int *out = (int *)malloc(OutArraySize);
klee_make_symbolic(in, ArraySize, "in");
// create some memory objects on the device
int *d_in;
int *d_out;
cudaMalloc((void **)&d_in, ArraySize);
cudaMalloc((void **)&d_out, ArraySize);
// memcpy into these objects
cudaMemcpy(d_in, in, ArraySize, cudaMemcpyHostToDevice);
// run the kernel
compact<<<1,N>>>(d_out, d_in);
// memcpy back the result
cudaMemcpy(out, d_out, OutArraySize, cudaMemcpyDeviceToHost);
#ifndef _SYM
// check results
unsigned idx = 0;
for (unsigned i=0; i<N; ++i) {
unsigned num = CHOOSE(in[i], MAXWRITE);
for (unsigned j=0; j<num; ++j) {
assert(out[idx+j] == in[i]);
}
idx += num;
}
printf("TEST PASSED\n");
#endif
// cleanup
free(in);
free(out);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
aa50e8c822e987c7a81c15e9c2610d08b52ae874.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
namespace oneflow {
namespace {
template<typename T, typename K>
__global__ void OneHotEncodeGpu(int64_t elem_cnt, const int64_t depth, const T on_value,
const T off_value, const K* indices, T* out) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const int64_t row = i / depth;
const int64_t col = i - row * depth;
const int64_t idx = indices[row];
assert(idx >= 0 && idx < depth);
out[i] = (idx == col) ? on_value : off_value;
}
}
} // namespace
template<typename T, typename K>
class GpuOneHotKernel final : public user_op::OpKernel {
public:
GpuOneHotKernel() = default;
~GpuOneHotKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* indices = ctx->Tensor4ArgNameAndIndex("indices", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const int64_t num_indices = indices->shape().elem_cnt();
const int64_t depth = ctx->Attr<int64_t>("depth");
const DataType dtype = ctx->Attr<DataType>("dtype");
const T on_value = IsFloatingDataType(dtype)
? static_cast<T>(ctx->Attr<double>("floating_on_value"))
: static_cast<T>(ctx->Attr<int64_t>("integer_on_value"));
const T off_value = IsFloatingDataType(dtype)
? static_cast<T>(ctx->Attr<double>("floating_off_value"))
: static_cast<T>(ctx->Attr<int64_t>("integer_off_value"));
RUN_CUDA_KERNEL((OneHotEncodeGpu<T, K>), ctx->device_ctx(), num_indices * depth,
num_indices * depth, depth, on_value, off_value, indices->dptr<K>(),
out->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_ONE_HOT_KERNEL(dtype, itype) \
REGISTER_USER_KERNEL("one_hot").SetCreateFn<GpuOneHotKernel<dtype, itype>>().SetIsMatchedHob( \
(user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("indices", 0) == GetDataType<itype>::value) \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value));
REGISTER_GPU_ONE_HOT_KERNEL(int32_t, int32_t)
REGISTER_GPU_ONE_HOT_KERNEL(int32_t, int64_t)
REGISTER_GPU_ONE_HOT_KERNEL(int64_t, int32_t)
REGISTER_GPU_ONE_HOT_KERNEL(int64_t, int64_t)
REGISTER_GPU_ONE_HOT_KERNEL(float, int32_t)
REGISTER_GPU_ONE_HOT_KERNEL(float, int64_t)
REGISTER_GPU_ONE_HOT_KERNEL(double, int32_t)
REGISTER_GPU_ONE_HOT_KERNEL(double, int64_t)
} // namespace oneflow
| aa50e8c822e987c7a81c15e9c2610d08b52ae874.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
namespace oneflow {
namespace {
template<typename T, typename K>
__global__ void OneHotEncodeGpu(int64_t elem_cnt, const int64_t depth, const T on_value,
const T off_value, const K* indices, T* out) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const int64_t row = i / depth;
const int64_t col = i - row * depth;
const int64_t idx = indices[row];
assert(idx >= 0 && idx < depth);
out[i] = (idx == col) ? on_value : off_value;
}
}
} // namespace
template<typename T, typename K>
class GpuOneHotKernel final : public user_op::OpKernel {
public:
GpuOneHotKernel() = default;
~GpuOneHotKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* indices = ctx->Tensor4ArgNameAndIndex("indices", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const int64_t num_indices = indices->shape().elem_cnt();
const int64_t depth = ctx->Attr<int64_t>("depth");
const DataType dtype = ctx->Attr<DataType>("dtype");
const T on_value = IsFloatingDataType(dtype)
? static_cast<T>(ctx->Attr<double>("floating_on_value"))
: static_cast<T>(ctx->Attr<int64_t>("integer_on_value"));
const T off_value = IsFloatingDataType(dtype)
? static_cast<T>(ctx->Attr<double>("floating_off_value"))
: static_cast<T>(ctx->Attr<int64_t>("integer_off_value"));
RUN_CUDA_KERNEL((OneHotEncodeGpu<T, K>), ctx->device_ctx(), num_indices * depth,
num_indices * depth, depth, on_value, off_value, indices->dptr<K>(),
out->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_ONE_HOT_KERNEL(dtype, itype) \
REGISTER_USER_KERNEL("one_hot").SetCreateFn<GpuOneHotKernel<dtype, itype>>().SetIsMatchedHob( \
(user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("indices", 0) == GetDataType<itype>::value) \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value));
REGISTER_GPU_ONE_HOT_KERNEL(int32_t, int32_t)
REGISTER_GPU_ONE_HOT_KERNEL(int32_t, int64_t)
REGISTER_GPU_ONE_HOT_KERNEL(int64_t, int32_t)
REGISTER_GPU_ONE_HOT_KERNEL(int64_t, int64_t)
REGISTER_GPU_ONE_HOT_KERNEL(float, int32_t)
REGISTER_GPU_ONE_HOT_KERNEL(float, int64_t)
REGISTER_GPU_ONE_HOT_KERNEL(double, int32_t)
REGISTER_GPU_ONE_HOT_KERNEL(double, int64_t)
} // namespace oneflow
|
181c8866cd5a89d91799715c14b39695c1375227.hip | // !!! This is a file automatically generated by hipify!!!
#include "../gpu_inc/SGM.cuh"
GPU_SGM::GPU_SGM()
{
hipSetDevice(0);
checkCudaErrors(hipStreamCreate(&stream1));
checkCudaErrors(hipStreamCreate(&stream2));
checkCudaErrors(hipStreamCreate(&stream3));
checkCudaErrors(hipStreamCreate(&stream4));
checkCudaErrors(hipStreamCreate(&stream5));
checkCudaErrors(hipStreamCreate(&stream6));
checkCudaErrors(hipStreamCreate(&stream7));
checkCudaErrors(hipStreamCreate(&stream8));
checkCudaErrors(hipMalloc((void**)&d_img_l, IMG_H* IMG_W * sizeof(uchar)));
checkCudaErrors(hipMalloc((void**)&d_img_r, IMG_H * IMG_W * sizeof(uchar)));
checkCudaErrors(hipMalloc((void**)&d_disp, IMG_H * IMG_W * sizeof(uchar)));
checkCudaErrors(hipMalloc((void**)&d_filtered_disp, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_cost_table_l, IMG_H * IMG_W * sizeof(uint64_t)));
checkCudaErrors(hipMalloc((void**)&d_cost_table_r, IMG_H * IMG_W * sizeof(uint64_t)));
checkCudaErrors(hipMalloc((void**)&d_cost, IMG_H * IMG_W * MAX_DISP * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_L1, IMG_H * IMG_W * MAX_DISP * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_L2, IMG_H * IMG_W * MAX_DISP * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_min_L1, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_min_L2, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_L3, IMG_H * IMG_W * MAX_DISP * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_L4, IMG_H * IMG_W * MAX_DISP * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_min_L3, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_min_L4, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_L5, IMG_H * IMG_W * MAX_DISP * sizeof(short)));
checkCudaErrors(hipMalloc((void**)&d_L6, IMG_H * IMG_W * MAX_DISP * sizeof(short)));
checkCudaErrors(hipMalloc((void**)&d_min_L5, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_min_L6, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_L7, IMG_H * IMG_W * MAX_DISP * sizeof(short)));
checkCudaErrors(hipMalloc((void**)&d_L8, IMG_H * IMG_W * MAX_DISP * sizeof(short)));
checkCudaErrors(hipMalloc((void**)&d_min_L7, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_min_L8, IMG_H * IMG_W * sizeof(float)));
P1 = 10;
P2 = 100;
checkCudaErrors(hipMalloc((void**)&d_label, IMG_H * IMG_W * sizeof(int)));
checkCudaErrors(hipMalloc((void**)&d_area, IMG_H * IMG_W * sizeof(int)));
disp.create(IMG_H, IMG_W, CV_8UC1);
filtered_disp.create(IMG_H, IMG_W, CV_32FC1);
colored_disp.create(IMG_H, IMG_W, CV_8UC3);
disp_cnt = 0;
}
GPU_SGM::~GPU_SGM()
{
checkCudaErrors(hipFree(d_img_l));
checkCudaErrors(hipFree(d_img_r));
checkCudaErrors(hipFree(d_disp));
checkCudaErrors(hipFree(d_filtered_disp));
checkCudaErrors(hipFree(d_cost_table_l));
checkCudaErrors(hipFree(d_cost_table_r));
checkCudaErrors(hipFree(d_cost));
checkCudaErrors(hipFree(d_L1));
checkCudaErrors(hipFree(d_L2));
checkCudaErrors(hipFree(d_min_L1));
checkCudaErrors(hipFree(d_min_L2));
checkCudaErrors(hipFree(d_L3));
checkCudaErrors(hipFree(d_L4));
checkCudaErrors(hipFree(d_min_L3));
checkCudaErrors(hipFree(d_min_L4));
checkCudaErrors(hipFree(d_L5));
checkCudaErrors(hipFree(d_L6));
checkCudaErrors(hipFree(d_min_L5));
checkCudaErrors(hipFree(d_min_L6));
checkCudaErrors(hipFree(d_L7));
checkCudaErrors(hipFree(d_L8));
checkCudaErrors(hipFree(d_min_L7));
checkCudaErrors(hipFree(d_min_L8));
checkCudaErrors(hipFree(d_label));
checkCudaErrors(hipFree(d_area));
checkCudaErrors(hipStreamDestroy(stream1));
checkCudaErrors(hipStreamDestroy(stream2));
checkCudaErrors(hipStreamDestroy(stream3));
checkCudaErrors(hipStreamDestroy(stream4));
checkCudaErrors(hipStreamDestroy(stream5));
checkCudaErrors(hipStreamDestroy(stream6));
checkCudaErrors(hipStreamDestroy(stream7));
checkCudaErrors(hipStreamDestroy(stream8));
}
void GPU_SGM::process(Mat &img_l, Mat &img_r)
{
this->img_l = img_l;
this->img_r = img_r;
hipSetDevice(0);
hipMemcpyAsync(d_img_l, img_l.data, IMG_H* IMG_W * sizeof(uchar), hipMemcpyHostToDevice, stream1);
hipMemcpyAsync(d_img_r, img_r.data, IMG_H* IMG_W * sizeof(uchar), hipMemcpyHostToDevice, stream2);
hipStreamSynchronize(stream1);
hipStreamSynchronize(stream2);
double be = get_cur_ms();
dim3 grid, block;
grid.x = (IMG_W - 1) / 32 + 1;
grid.y = (IMG_H - 1) / 32 + 1;
block.x = 32;
block.y = 32;
cu_build_cost_table << <grid, block, 0, stream1 >> > (d_img_l, d_img_r, d_cost_table_l, d_cost_table_r, IMG_W, IMG_H, CU_WIN_W, CU_WIN_H);
cu_build_dsi_from_table << <grid, block, 0, stream1 >> > (d_cost_table_l, d_cost_table_r, d_cost, IMG_W, IMG_H, MAX_DISP);
hipDeviceSynchronize();
printf("build cost takes %lf ms\n", get_cur_ms() - be);
be = get_cur_ms();
grid.x = (IMG_W - 1) / 32 + 1;
grid.y = (MAX_DISP - 1) / 32 + 1;
cu_cost_horizontal_filter << <grid, block, 0, stream1 >> > (d_cost, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_W);
cu_cost_vertical_filter << <grid, block, 0, stream1 >> > (d_cost, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_H);
//cu_cost_horizontal_filter_new << <grid, block, 0, stream1 >> > (d_cost, d_L1, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_W);
//cu_cost_vertical_filter_new << <grid, block, 0, stream2 >> > (d_cost, d_L2, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_H);
//hipStreamSynchronize(stream1);
//hipStreamSynchronize(stream2);
//cu_cost_filter << <grid, block, 0, stream1 >> > (d_cost, d_L1, d_L2, IMG_W, IMG_H, MAX_DISP);
hipDeviceSynchronize();
printf("cost filter takes %lf ms\n", get_cur_ms() - be);
be = get_cur_ms();
dim3 dp_grid, dp_block;
dp_grid.x = IMG_W;
dp_grid.y = 1;
dp_block.x = MAX_DISP; // for dp syncronize
dp_block.y = 1;
cu_dp_L1 << <dp_grid, dp_block, 0, stream1 >> > (d_cost, d_L1, d_min_L1, IMG_W, IMG_H, MAX_DISP, P1, P2);
cu_dp_L2 << <dp_grid, dp_block, 0, stream2 >> > (d_cost, d_L2, d_min_L2, IMG_W, IMG_H, MAX_DISP, P1, P2);
cu_dp_L3 << <dp_grid, dp_block, 0, stream3 >> > (d_cost, d_L3, d_min_L3, IMG_W, IMG_H, MAX_DISP, P1, P2);
cu_dp_L4 << <dp_grid, dp_block, 0, stream4 >> > (d_cost, d_L4, d_min_L4, IMG_W, IMG_H, MAX_DISP, P1, P2);
if (CU_USE_8_PATH)
{
//for (int i = 0; i < IMG_H; i++)
//{
// cu_dp_L5 << <dp_grid, dp_block, 0, stream5 >> > (d_cost, d_L5, d_min_L5, i, IMG_W, IMG_H, MAX_DISP, P1, P2);
// cu_dp_L6 << <dp_grid, dp_block, 0, stream6 >> > (d_cost, d_L6, d_min_L6, i, IMG_W, IMG_H, MAX_DISP, P1, P2);
// cu_dp_L7 << <dp_grid, dp_block, 0, stream7 >> > (d_cost, d_L7, d_min_L7, IMG_H - 1 - i, IMG_W, IMG_H, MAX_DISP, P1, P2);
// cu_dp_L8 << <dp_grid, dp_block, 0, stream8 >> > (d_cost, d_L8, d_min_L8, IMG_H - 1 - i, IMG_W, IMG_H, MAX_DISP, P1, P2);
//}
// use truncated dp to approximate the original method
cu_dp_L5_truncated << <dp_grid, dp_block, 0, stream5 >> > (d_cost, d_L5, d_min_L5, IMG_W, IMG_H, MAX_DISP, P1, P2);
cu_dp_L6_truncated << <dp_grid, dp_block, 0, stream6 >> > (d_cost, d_L6, d_min_L6, IMG_W, IMG_H, MAX_DISP, P1, P2);
cu_dp_L7_truncated << <dp_grid, dp_block, 0, stream7 >> > (d_cost, d_L7, d_min_L7, IMG_W, IMG_H, MAX_DISP, P1, P2);
cu_dp_L8_truncated << <dp_grid, dp_block, 0, stream8 >> > (d_cost, d_L8, d_min_L8, IMG_W, IMG_H, MAX_DISP, P1, P2);
}
hipDeviceSynchronize();
printf("dp takes %lf ms\n", get_cur_ms() - be);
be = get_cur_ms();
grid.x = 512;
grid.y = 512;
aggregation << <grid, block, 0, stream1 >> > (d_cost, d_L1, d_L2, d_L3, d_L4, d_L5, d_L6, d_L7, d_L8, IMG_W, IMG_H, MAX_DISP);
grid.x = (IMG_W - 1) / 32 + 1;
grid.y = (IMG_H - 1) / 32 + 1;
wta << <grid, block, 0, stream1 >> >(d_cost, d_disp, IMG_W, IMG_H, MAX_DISP, CU_UNIQUE_RATIO, INVALID_DISP);
hipDeviceSynchronize();
printf("wta takes %lf ms\n", get_cur_ms() - be);
be = get_cur_ms();
cu_subpixel << <grid, block, 0, stream1 >> > (d_cost, d_disp, d_filtered_disp, IMG_W, IMG_H, MAX_DISP, INVALID_DISP);
cu_median_filter << <grid, block, 0, stream1 >> > (d_filtered_disp, IMG_W, IMG_H, MAX_DISP, CU_MEDIAN_FILTER_W, CU_MEDIAN_FILTER_H);
cu_speckle_filter_init << <grid, block, 0, stream2 >> > (d_label, d_area, IMG_W, IMG_H);
hipStreamSynchronize(stream1);
hipStreamSynchronize(stream2);
cu_speckle_filter_union_find << <grid, block, 0, stream1 >> > (d_filtered_disp, d_label, d_area, IMG_W, IMG_H, CU_SPECKLE_DIS);
cu_speckle_filter_sum_up << <grid, block, 0, stream1 >> > (d_label, d_area, IMG_W, IMG_H);
cu_speckle_filter_end << <grid, block, 0, stream1 >> > (d_filtered_disp, d_label, d_area, IMG_W, IMG_H, INVALID_DISP, CU_SPECKLE_SIZE);
hipDeviceSynchronize();
printf("cuda post_filter takes %lf ms\n", get_cur_ms() - be);
hipMemcpyAsync(filtered_disp.data, d_filtered_disp, IMG_H * IMG_W * sizeof(float), hipMemcpyDeviceToHost, stream1);
//hipMemcpyAsync(disp.data, d_disp, IMG_H * IMG_W * sizeof(uchar), hipMemcpyDeviceToHost, stream2);
}
void GPU_SGM::show_disp(Mat &debug_view)
{
// left border invalid
for (int i = 0; i < filtered_disp.rows; i++)
{
float *ptr = filtered_disp.ptr<float>(i);
for (int j = 0; j < MAX_DISP / SCALE; j++)
{
ptr[j] = INVALID_DISP;
}
}
// convert to RGB for better observation
colormap();
Mat tmp;
debug_view = debug_view.zeros(IMG_H * 2, IMG_W, CV_8UC3);
tmp = debug_view(Rect(0, 0, IMG_W, IMG_H));
cvtColor(img_l, img_l, CV_GRAY2BGR);
img_l.copyTo(tmp);
tmp = debug_view(Rect(0, IMG_H - 1, IMG_W, IMG_H));
colored_disp.copyTo(tmp);
}
void GPU_SGM::colormap()
{
float disp_value = 0;
for (int i = 0; i < filtered_disp.rows; i++)
{
for (int j = 0; j < filtered_disp.cols; j++)
{
disp_value = filtered_disp.at<float>(i, j);
//disp_value = disp.at<uchar>(i, j);
if (disp_value > MAX_DISP - 1)
{
colored_disp.at<Vec3b>(i, j)[0] = 0;
colored_disp.at<Vec3b>(i, j)[1] = 0;
colored_disp.at<Vec3b>(i, j)[2] = 0;
}
else
{
disp_value *= (256 / (MAX_DISP));
if (disp_value <= 51)
{
colored_disp.at<Vec3b>(i, j)[0] = 255;
colored_disp.at<Vec3b>(i, j)[1] = disp_value * 5;
colored_disp.at<Vec3b>(i, j)[2] = 0;
}
else if (disp_value <= 102)
{
disp_value -= 51;
colored_disp.at<Vec3b>(i, j)[0] = 255 - disp_value * 5;
colored_disp.at<Vec3b>(i, j)[1] = 255;
colored_disp.at<Vec3b>(i, j)[2] = 0;
}
else if (disp_value <= 153)
{
disp_value -= 102;
colored_disp.at<Vec3b>(i, j)[0] = 0;
colored_disp.at<Vec3b>(i, j)[1] = 255;
colored_disp.at<Vec3b>(i, j)[2] = disp_value * 5;
}
else if (disp_value <= 204)
{
disp_value -= 153;
colored_disp.at<Vec3b>(i, j)[0] = 0;
colored_disp.at<Vec3b>(i, j)[1] = 255 - uchar(128.0*disp_value / 51.0 + 0.5);
colored_disp.at<Vec3b>(i, j)[2] = 255;
}
else
{
disp_value -= 204;
colored_disp.at<Vec3b>(i, j)[0] = 0;
colored_disp.at<Vec3b>(i, j)[1] = 127 - uchar(127.0*disp_value / 51.0 + 0.5);
colored_disp.at<Vec3b>(i, j)[2] = 255;
}
}
}
}
}
| 181c8866cd5a89d91799715c14b39695c1375227.cu | #include "../gpu_inc/SGM.cuh"
GPU_SGM::GPU_SGM()
{
cudaSetDevice(0);
checkCudaErrors(cudaStreamCreate(&stream1));
checkCudaErrors(cudaStreamCreate(&stream2));
checkCudaErrors(cudaStreamCreate(&stream3));
checkCudaErrors(cudaStreamCreate(&stream4));
checkCudaErrors(cudaStreamCreate(&stream5));
checkCudaErrors(cudaStreamCreate(&stream6));
checkCudaErrors(cudaStreamCreate(&stream7));
checkCudaErrors(cudaStreamCreate(&stream8));
checkCudaErrors(cudaMalloc((void**)&d_img_l, IMG_H* IMG_W * sizeof(uchar)));
checkCudaErrors(cudaMalloc((void**)&d_img_r, IMG_H * IMG_W * sizeof(uchar)));
checkCudaErrors(cudaMalloc((void**)&d_disp, IMG_H * IMG_W * sizeof(uchar)));
checkCudaErrors(cudaMalloc((void**)&d_filtered_disp, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_cost_table_l, IMG_H * IMG_W * sizeof(uint64_t)));
checkCudaErrors(cudaMalloc((void**)&d_cost_table_r, IMG_H * IMG_W * sizeof(uint64_t)));
checkCudaErrors(cudaMalloc((void**)&d_cost, IMG_H * IMG_W * MAX_DISP * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_L1, IMG_H * IMG_W * MAX_DISP * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_L2, IMG_H * IMG_W * MAX_DISP * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_min_L1, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_min_L2, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_L3, IMG_H * IMG_W * MAX_DISP * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_L4, IMG_H * IMG_W * MAX_DISP * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_min_L3, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_min_L4, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_L5, IMG_H * IMG_W * MAX_DISP * sizeof(short)));
checkCudaErrors(cudaMalloc((void**)&d_L6, IMG_H * IMG_W * MAX_DISP * sizeof(short)));
checkCudaErrors(cudaMalloc((void**)&d_min_L5, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_min_L6, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_L7, IMG_H * IMG_W * MAX_DISP * sizeof(short)));
checkCudaErrors(cudaMalloc((void**)&d_L8, IMG_H * IMG_W * MAX_DISP * sizeof(short)));
checkCudaErrors(cudaMalloc((void**)&d_min_L7, IMG_H * IMG_W * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_min_L8, IMG_H * IMG_W * sizeof(float)));
P1 = 10;
P2 = 100;
checkCudaErrors(cudaMalloc((void**)&d_label, IMG_H * IMG_W * sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&d_area, IMG_H * IMG_W * sizeof(int)));
disp.create(IMG_H, IMG_W, CV_8UC1);
filtered_disp.create(IMG_H, IMG_W, CV_32FC1);
colored_disp.create(IMG_H, IMG_W, CV_8UC3);
disp_cnt = 0;
}
GPU_SGM::~GPU_SGM()
{
checkCudaErrors(cudaFree(d_img_l));
checkCudaErrors(cudaFree(d_img_r));
checkCudaErrors(cudaFree(d_disp));
checkCudaErrors(cudaFree(d_filtered_disp));
checkCudaErrors(cudaFree(d_cost_table_l));
checkCudaErrors(cudaFree(d_cost_table_r));
checkCudaErrors(cudaFree(d_cost));
checkCudaErrors(cudaFree(d_L1));
checkCudaErrors(cudaFree(d_L2));
checkCudaErrors(cudaFree(d_min_L1));
checkCudaErrors(cudaFree(d_min_L2));
checkCudaErrors(cudaFree(d_L3));
checkCudaErrors(cudaFree(d_L4));
checkCudaErrors(cudaFree(d_min_L3));
checkCudaErrors(cudaFree(d_min_L4));
checkCudaErrors(cudaFree(d_L5));
checkCudaErrors(cudaFree(d_L6));
checkCudaErrors(cudaFree(d_min_L5));
checkCudaErrors(cudaFree(d_min_L6));
checkCudaErrors(cudaFree(d_L7));
checkCudaErrors(cudaFree(d_L8));
checkCudaErrors(cudaFree(d_min_L7));
checkCudaErrors(cudaFree(d_min_L8));
checkCudaErrors(cudaFree(d_label));
checkCudaErrors(cudaFree(d_area));
checkCudaErrors(cudaStreamDestroy(stream1));
checkCudaErrors(cudaStreamDestroy(stream2));
checkCudaErrors(cudaStreamDestroy(stream3));
checkCudaErrors(cudaStreamDestroy(stream4));
checkCudaErrors(cudaStreamDestroy(stream5));
checkCudaErrors(cudaStreamDestroy(stream6));
checkCudaErrors(cudaStreamDestroy(stream7));
checkCudaErrors(cudaStreamDestroy(stream8));
}
void GPU_SGM::process(Mat &img_l, Mat &img_r)
{
this->img_l = img_l;
this->img_r = img_r;
cudaSetDevice(0);
cudaMemcpyAsync(d_img_l, img_l.data, IMG_H* IMG_W * sizeof(uchar), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(d_img_r, img_r.data, IMG_H* IMG_W * sizeof(uchar), cudaMemcpyHostToDevice, stream2);
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
double be = get_cur_ms();
dim3 grid, block;
grid.x = (IMG_W - 1) / 32 + 1;
grid.y = (IMG_H - 1) / 32 + 1;
block.x = 32;
block.y = 32;
cu_build_cost_table << <grid, block, 0, stream1 >> > (d_img_l, d_img_r, d_cost_table_l, d_cost_table_r, IMG_W, IMG_H, CU_WIN_W, CU_WIN_H);
cu_build_dsi_from_table << <grid, block, 0, stream1 >> > (d_cost_table_l, d_cost_table_r, d_cost, IMG_W, IMG_H, MAX_DISP);
cudaDeviceSynchronize();
printf("build cost takes %lf ms\n", get_cur_ms() - be);
be = get_cur_ms();
grid.x = (IMG_W - 1) / 32 + 1;
grid.y = (MAX_DISP - 1) / 32 + 1;
cu_cost_horizontal_filter << <grid, block, 0, stream1 >> > (d_cost, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_W);
cu_cost_vertical_filter << <grid, block, 0, stream1 >> > (d_cost, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_H);
//cu_cost_horizontal_filter_new << <grid, block, 0, stream1 >> > (d_cost, d_L1, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_W);
//cu_cost_vertical_filter_new << <grid, block, 0, stream2 >> > (d_cost, d_L2, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_H);
//cudaStreamSynchronize(stream1);
//cudaStreamSynchronize(stream2);
//cu_cost_filter << <grid, block, 0, stream1 >> > (d_cost, d_L1, d_L2, IMG_W, IMG_H, MAX_DISP);
cudaDeviceSynchronize();
printf("cost filter takes %lf ms\n", get_cur_ms() - be);
be = get_cur_ms();
dim3 dp_grid, dp_block;
dp_grid.x = IMG_W;
dp_grid.y = 1;
dp_block.x = MAX_DISP; // for dp syncronize
dp_block.y = 1;
cu_dp_L1 << <dp_grid, dp_block, 0, stream1 >> > (d_cost, d_L1, d_min_L1, IMG_W, IMG_H, MAX_DISP, P1, P2);
cu_dp_L2 << <dp_grid, dp_block, 0, stream2 >> > (d_cost, d_L2, d_min_L2, IMG_W, IMG_H, MAX_DISP, P1, P2);
cu_dp_L3 << <dp_grid, dp_block, 0, stream3 >> > (d_cost, d_L3, d_min_L3, IMG_W, IMG_H, MAX_DISP, P1, P2);
cu_dp_L4 << <dp_grid, dp_block, 0, stream4 >> > (d_cost, d_L4, d_min_L4, IMG_W, IMG_H, MAX_DISP, P1, P2);
if (CU_USE_8_PATH)
{
//for (int i = 0; i < IMG_H; i++)
//{
// cu_dp_L5 << <dp_grid, dp_block, 0, stream5 >> > (d_cost, d_L5, d_min_L5, i, IMG_W, IMG_H, MAX_DISP, P1, P2);
// cu_dp_L6 << <dp_grid, dp_block, 0, stream6 >> > (d_cost, d_L6, d_min_L6, i, IMG_W, IMG_H, MAX_DISP, P1, P2);
// cu_dp_L7 << <dp_grid, dp_block, 0, stream7 >> > (d_cost, d_L7, d_min_L7, IMG_H - 1 - i, IMG_W, IMG_H, MAX_DISP, P1, P2);
// cu_dp_L8 << <dp_grid, dp_block, 0, stream8 >> > (d_cost, d_L8, d_min_L8, IMG_H - 1 - i, IMG_W, IMG_H, MAX_DISP, P1, P2);
//}
// use truncated dp to approximate the original method
cu_dp_L5_truncated << <dp_grid, dp_block, 0, stream5 >> > (d_cost, d_L5, d_min_L5, IMG_W, IMG_H, MAX_DISP, P1, P2);
cu_dp_L6_truncated << <dp_grid, dp_block, 0, stream6 >> > (d_cost, d_L6, d_min_L6, IMG_W, IMG_H, MAX_DISP, P1, P2);
cu_dp_L7_truncated << <dp_grid, dp_block, 0, stream7 >> > (d_cost, d_L7, d_min_L7, IMG_W, IMG_H, MAX_DISP, P1, P2);
cu_dp_L8_truncated << <dp_grid, dp_block, 0, stream8 >> > (d_cost, d_L8, d_min_L8, IMG_W, IMG_H, MAX_DISP, P1, P2);
}
cudaDeviceSynchronize();
printf("dp takes %lf ms\n", get_cur_ms() - be);
be = get_cur_ms();
grid.x = 512;
grid.y = 512;
aggregation << <grid, block, 0, stream1 >> > (d_cost, d_L1, d_L2, d_L3, d_L4, d_L5, d_L6, d_L7, d_L8, IMG_W, IMG_H, MAX_DISP);
grid.x = (IMG_W - 1) / 32 + 1;
grid.y = (IMG_H - 1) / 32 + 1;
wta << <grid, block, 0, stream1 >> >(d_cost, d_disp, IMG_W, IMG_H, MAX_DISP, CU_UNIQUE_RATIO, INVALID_DISP);
cudaDeviceSynchronize();
printf("wta takes %lf ms\n", get_cur_ms() - be);
be = get_cur_ms();
cu_subpixel << <grid, block, 0, stream1 >> > (d_cost, d_disp, d_filtered_disp, IMG_W, IMG_H, MAX_DISP, INVALID_DISP);
cu_median_filter << <grid, block, 0, stream1 >> > (d_filtered_disp, IMG_W, IMG_H, MAX_DISP, CU_MEDIAN_FILTER_W, CU_MEDIAN_FILTER_H);
cu_speckle_filter_init << <grid, block, 0, stream2 >> > (d_label, d_area, IMG_W, IMG_H);
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
cu_speckle_filter_union_find << <grid, block, 0, stream1 >> > (d_filtered_disp, d_label, d_area, IMG_W, IMG_H, CU_SPECKLE_DIS);
cu_speckle_filter_sum_up << <grid, block, 0, stream1 >> > (d_label, d_area, IMG_W, IMG_H);
cu_speckle_filter_end << <grid, block, 0, stream1 >> > (d_filtered_disp, d_label, d_area, IMG_W, IMG_H, INVALID_DISP, CU_SPECKLE_SIZE);
cudaDeviceSynchronize();
printf("cuda post_filter takes %lf ms\n", get_cur_ms() - be);
cudaMemcpyAsync(filtered_disp.data, d_filtered_disp, IMG_H * IMG_W * sizeof(float), cudaMemcpyDeviceToHost, stream1);
//cudaMemcpyAsync(disp.data, d_disp, IMG_H * IMG_W * sizeof(uchar), cudaMemcpyDeviceToHost, stream2);
}
void GPU_SGM::show_disp(Mat &debug_view)
{
// left border invalid
for (int i = 0; i < filtered_disp.rows; i++)
{
float *ptr = filtered_disp.ptr<float>(i);
for (int j = 0; j < MAX_DISP / SCALE; j++)
{
ptr[j] = INVALID_DISP;
}
}
// convert to RGB for better observation
colormap();
Mat tmp;
debug_view = debug_view.zeros(IMG_H * 2, IMG_W, CV_8UC3);
tmp = debug_view(Rect(0, 0, IMG_W, IMG_H));
cvtColor(img_l, img_l, CV_GRAY2BGR);
img_l.copyTo(tmp);
tmp = debug_view(Rect(0, IMG_H - 1, IMG_W, IMG_H));
colored_disp.copyTo(tmp);
}
void GPU_SGM::colormap()
{
float disp_value = 0;
for (int i = 0; i < filtered_disp.rows; i++)
{
for (int j = 0; j < filtered_disp.cols; j++)
{
disp_value = filtered_disp.at<float>(i, j);
//disp_value = disp.at<uchar>(i, j);
if (disp_value > MAX_DISP - 1)
{
colored_disp.at<Vec3b>(i, j)[0] = 0;
colored_disp.at<Vec3b>(i, j)[1] = 0;
colored_disp.at<Vec3b>(i, j)[2] = 0;
}
else
{
disp_value *= (256 / (MAX_DISP));
if (disp_value <= 51)
{
colored_disp.at<Vec3b>(i, j)[0] = 255;
colored_disp.at<Vec3b>(i, j)[1] = disp_value * 5;
colored_disp.at<Vec3b>(i, j)[2] = 0;
}
else if (disp_value <= 102)
{
disp_value -= 51;
colored_disp.at<Vec3b>(i, j)[0] = 255 - disp_value * 5;
colored_disp.at<Vec3b>(i, j)[1] = 255;
colored_disp.at<Vec3b>(i, j)[2] = 0;
}
else if (disp_value <= 153)
{
disp_value -= 102;
colored_disp.at<Vec3b>(i, j)[0] = 0;
colored_disp.at<Vec3b>(i, j)[1] = 255;
colored_disp.at<Vec3b>(i, j)[2] = disp_value * 5;
}
else if (disp_value <= 204)
{
disp_value -= 153;
colored_disp.at<Vec3b>(i, j)[0] = 0;
colored_disp.at<Vec3b>(i, j)[1] = 255 - uchar(128.0*disp_value / 51.0 + 0.5);
colored_disp.at<Vec3b>(i, j)[2] = 255;
}
else
{
disp_value -= 204;
colored_disp.at<Vec3b>(i, j)[0] = 0;
colored_disp.at<Vec3b>(i, j)[1] = 127 - uchar(127.0*disp_value / 51.0 + 0.5);
colored_disp.at<Vec3b>(i, j)[2] = 255;
}
}
}
}
}
|
950ab56718a0007edec203e64c779799cc84926a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <CudaMathEngine.h>
#include <CudaMathEngineDnnConvs.h>
#include <CudaCommon.h>
#include <CudaDevice.h>
#include <MathEngineCommon.h>
#include <MemoryHandleInternal.h>
#include <Kernels/CudaDnnConvKernels.h>
namespace NeoML {
// Temporary matrix height
static inline int tempMatrixHeight( const CCudaConvolutionDescInternal& desc )
{
return desc.Source.ObjectCount() * desc.Result.Height() * desc.Result.Width();
}
// Temporary matrix width
static inline int tempMatrixWidth( const CCudaConvolutionDescInternal& desc )
{
return desc.Filter.ObjectSize();
}
CConvolutionDesc* CCudaMathEngine::InitBlobConvolution( const CBlobDesc& input, int paddingHeight,
int paddingWidth, int strideHeight, int strideWidth, int dilationHeight, int dilationWidth,
const CBlobDesc& filter, const CBlobDesc& output )
{
int totalInputChannels = input.Channels() * input.Depth();
int totalOutputChannels = output.Channels() * output.Depth();
CCudaConvolutionDesc* desc = new CCudaConvolutionDesc();
desc->Internal.Source = input;
desc->Internal.Filter = filter;
desc->Internal.Result = output;
desc->Internal.StrideHeight = strideHeight;
desc->Internal.StrideWidth = strideWidth;
desc->Internal.PaddingHeight = paddingHeight;
desc->Internal.PaddingWidth = paddingWidth;
desc->Internal.DilationHeight = dilationHeight;
desc->Internal.DilationWidth = dilationWidth;
return desc;
}
void CCudaMathEngine::BlobConvolution( const CConvolutionDesc& convDesc,
const CFloatHandle& sourceData, const CFloatHandle& filterData, const CFloatHandle* freeTermData,
const CFloatHandle& resultData )
{
SetCudaDevice( device->DeviceNumber );
const CCudaConvolutionDescInternal& desc = static_cast<const CCudaConvolutionDesc&>( convDesc ).Internal;
const CCudaBlobDesc& source = desc.Source;
const CCudaBlobDesc& filter = desc.Filter;
const CCudaBlobDesc& result = desc.Result;
if( filter.Height() == 3 && filter.Width() == 3
&& desc.StrideHeight == 1 && desc.StrideWidth == 1
&& desc.DilationHeight == 1 && desc.DilationWidth == 1
&& source.Channels() * source.Depth() < 16 )
{
// Use a convolution kernel of size 3*3 with stride 1
dim3 blockCount;
dim3 threadCount;
int widthNorm = ( desc.Result.Width() + 7 ) / 8;
getCudaTaskGrid3DMinZYX( 1, 1, 1024, blockCount, threadCount, result.ObjectCount() * result.Height(), widthNorm,
filter.ObjectCount(), 512 );
hipLaunchKernelGGL(( Conv3x3s1d1Kernel1x8), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( sourceData ), GetRaw( filterData ),
freeTermData == 0 ? 0 : GetRaw( *freeTermData ), GetRaw( resultData ), widthNorm );
return;
}
if( filter.Height() == 1 && filter.Width() == 1
&& desc.StrideHeight == 1 && desc.StrideWidth == 1
&& desc.PaddingHeight == 0 && desc.PaddingWidth == 0 )
{
// The convolution is a matrix product anyway, without a temporary matrix
if( freeTermData != 0 ) {
// Fill the output matrix with the free term values
SetVectorToMatrixRows( resultData, result.ObjectCount() * result.Height() * result.Width(),
filter.ObjectCount(), *freeTermData );
multiplyMatrixByTransposedMatrixAndAdd( sourceData,
source.ObjectCount() * result.Height() * result.Width(),
filter.ObjectSize(), filter.ObjectSize(), filterData,
filter.ObjectCount(), filter.ObjectSize(), resultData,
filter.ObjectCount() );
} else {
MultiplyMatrixByTransposedMatrix( sourceData,
source.ObjectCount() * result.Height() * result.Width(),
filter.ObjectSize(), filter.ObjectSize(), filterData,
filter.ObjectCount(), filter.ObjectSize(), resultData,
filter.ObjectCount(), result.BlobSize() );
}
return;
}
const int tempMatrixWidth = filter.ObjectSize();
const int tempMatrixHeight = result.ObjectCount() * result.ObjectSize() / filter.ObjectCount();
const int tempMatrixHeightBatchSize = getCudaTempMatrixMaxHeight( tempMatrixHeight, tempMatrixWidth );
CFloatHandleStackVar tempMatrix( mathEngine(), tempMatrixHeightBatchSize * tempMatrixWidth );
int tempMatrixHeightIndex = 0;
while( tempMatrixHeightIndex < tempMatrixHeight ) {
int curTempMatrixHeight = min( tempMatrixHeight - tempMatrixHeightIndex, tempMatrixHeightBatchSize );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D( blockCount, threadCount, curTempMatrixHeight, source.Depth() * source.Channels() );
hipLaunchKernelGGL(( BuildTempMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( sourceData ),
tempMatrixHeightIndex, curTempMatrixHeight, GetRaw( tempMatrix.GetHandle() ) );
MultiplyMatrixByTransposedMatrix( tempMatrix, curTempMatrixHeight, filter.ObjectSize(), filter.ObjectSize(),
filterData, filter.ObjectCount(), filter.ObjectSize(),
resultData + tempMatrixHeightIndex * filter.ObjectCount(),
filter.ObjectCount(), curTempMatrixHeight * filter.ObjectCount() );
tempMatrixHeightIndex += curTempMatrixHeight;
}
if( freeTermData != 0 ) {
// Fill the output with the free term values
AddVectorToMatrixRows( 1, resultData, resultData,
result.BlobSize() / filter.ObjectCount(), filter.ObjectCount(), *freeTermData );
}
}
void CCudaMathEngine::BlobConvolutionBackward( const CConvolutionDesc& convDesc, const CFloatHandle& outputDiff,
const CFloatHandle& filter, const CFloatHandle* freeTerm, const CFloatHandle& inputDiff )
{
SetCudaDevice( device->DeviceNumber );
const CCudaConvolutionDescInternal& desc = static_cast<const CCudaConvolutionDesc&>( convDesc ).Internal;
const int filterCount = desc.Filter.ObjectCount();
const int filterObjectSize = desc.Filter.ObjectSize();
if( desc.Filter.Height() == 1 && desc.Filter.Width() == 1
&& desc.StrideHeight == 1 && desc.StrideWidth == 1
&& desc.PaddingHeight == 0 && desc.PaddingWidth == 0 )
{
// The convolution backward pass is a matrix product without creating a temporary matrix
MultiplyMatrixByMatrix( 1, outputDiff, desc.Result.BlobSize() / filterCount, filterCount,
filter, filterObjectSize, inputDiff, desc.Source.BlobSize() );
if( freeTerm != 0 ) {
AddVectorToMatrixRows( 1, inputDiff, inputDiff, desc.Source.ObjectCount() * desc.Source.Height() * desc.Source.Width(),
desc.Source.Channels() * desc.Source.Depth(), *freeTerm );
}
return;
}
if( freeTerm != 0 ) {
// Fill the input gradients with the free terms
SetVectorToMatrixRows( inputDiff, desc.Source.ObjectCount() * desc.Source.Height() * desc.Source.Width(),
desc.Source.Channels() * desc.Source.Depth(), *freeTerm );
} else {
VectorFill( inputDiff, 0.f, desc.Source.BlobSize() );
}
TBackwardOperationType operation = BOT_AtomicAdd;
if( ( desc.Filter.Width() - 1 ) * desc.DilationWidth + 1 <= desc.StrideWidth
&& ( desc.Filter.Height() - 1 ) * desc.DilationHeight + 1 <= desc.StrideHeight )
{
// The filter areas do not intersect, so atomic operations are not needed
operation = freeTerm == 0 ? BOT_Set : BOT_Add;
}
// Get the temporary matrix
const int matrixHeight = tempMatrixHeight( desc );
const int matrixWidth = tempMatrixWidth( desc );
const int tempMatrixHeightBatchSize = getCudaTempMatrixMaxHeight( matrixHeight, matrixWidth );
CFloatHandleStackVar tempMatrix( *this, tempMatrixHeightBatchSize * matrixWidth );
int tempMatrixHeightIndex = 0;
while( tempMatrixHeightIndex < matrixHeight ) {
int curTempMatrixHeight = min( matrixHeight - tempMatrixHeightIndex, tempMatrixHeightBatchSize );
MultiplyMatrixByMatrix( 1, outputDiff + tempMatrixHeightIndex * filterCount, curTempMatrixHeight, filterCount,
filter, filterObjectSize, tempMatrix, tempMatrix.Size() );
// Get the input gradients from the temporary matrix data
dim3 blockCount;
dim3 threadCount;
int widthNorm = ( matrixWidth + BuildInputFromTempMatrixCombine - 1 ) / BuildInputFromTempMatrixCombine;
getCudaTaskGrid2D( blockCount, threadCount, curTempMatrixHeight, widthNorm );
hipLaunchKernelGGL(( BuildInputFromTempMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( tempMatrix.GetHandle() ),
curTempMatrixHeight, matrixWidth, GetRaw( inputDiff ), operation, widthNorm, tempMatrixHeightIndex );
tempMatrixHeightIndex += curTempMatrixHeight;
}
}
void CCudaMathEngine::BlobConvolutionLearnAdd( const CConvolutionDesc& convDesc,
const CFloatHandle& input, const CFloatHandle& outputDiff, const CFloatHandle& filterDiff,
const CFloatHandle* freeTermDiff, bool isFreeTermDiffFromInput )
{
SetCudaDevice( device->DeviceNumber );
const CCudaConvolutionDescInternal& desc = static_cast<const CCudaConvolutionDesc&>( convDesc ).Internal;
if( freeTermDiff != 0 ) {
// Get the free term gradient
if( !isFreeTermDiffFromInput ) {
SumMatrixRowsAdd( 1, *freeTermDiff, outputDiff, desc.Result.BlobSize() / desc.Filter.ObjectCount(),
desc.Filter.ObjectCount() );
} else {
SumMatrixRowsAdd( 1, *freeTermDiff, input, desc.Source.BlobSize() / desc.Source.Channels(),
desc.Source.Channels() );
}
}
// Build the temporary matrix
const int matrixHeight = tempMatrixHeight( desc );
const int matrixWidth = tempMatrixWidth( desc );
const int filterCount = desc.Filter.ObjectCount();
const int tempMatrixHeightBatchSize = getCudaTempMatrixMaxHeight( matrixHeight, matrixWidth );
CFloatHandleStackVar tempMatrix( *this, tempMatrixHeightBatchSize * matrixWidth );
int tempMatrixHeightIndex = 0;
while( tempMatrixHeightIndex < matrixHeight ) {
int curTempMatrixHeight = min( matrixHeight - tempMatrixHeightIndex, tempMatrixHeightBatchSize );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D( blockCount, threadCount, curTempMatrixHeight, desc.Source.Depth() * desc.Source.Channels() );
hipLaunchKernelGGL(( BuildTempMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( input ), tempMatrixHeightIndex, curTempMatrixHeight,
GetRaw( tempMatrix.GetHandle() ) );
// Get the filter gradients by multiplying the temporary matrix and the output gradients
MultiplyTransposedMatrixByMatrixAndAdd( outputDiff + tempMatrixHeightIndex * filterCount, curTempMatrixHeight,
filterCount, filterCount, tempMatrix, matrixWidth, matrixWidth, filterDiff, matrixWidth, desc.Filter.BlobSize() );
tempMatrixHeightIndex += curTempMatrixHeight;
}
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
| 950ab56718a0007edec203e64c779799cc84926a.cu | /* Copyright © 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <CudaMathEngine.h>
#include <CudaMathEngineDnnConvs.h>
#include <CudaCommon.h>
#include <CudaDevice.h>
#include <MathEngineCommon.h>
#include <MemoryHandleInternal.h>
#include <Kernels/CudaDnnConvKernels.h>
namespace NeoML {
// Temporary matrix height
static inline int tempMatrixHeight( const CCudaConvolutionDescInternal& desc )
{
return desc.Source.ObjectCount() * desc.Result.Height() * desc.Result.Width();
}
// Temporary matrix width
static inline int tempMatrixWidth( const CCudaConvolutionDescInternal& desc )
{
return desc.Filter.ObjectSize();
}
CConvolutionDesc* CCudaMathEngine::InitBlobConvolution( const CBlobDesc& input, int paddingHeight,
int paddingWidth, int strideHeight, int strideWidth, int dilationHeight, int dilationWidth,
const CBlobDesc& filter, const CBlobDesc& output )
{
int totalInputChannels = input.Channels() * input.Depth();
int totalOutputChannels = output.Channels() * output.Depth();
CCudaConvolutionDesc* desc = new CCudaConvolutionDesc();
desc->Internal.Source = input;
desc->Internal.Filter = filter;
desc->Internal.Result = output;
desc->Internal.StrideHeight = strideHeight;
desc->Internal.StrideWidth = strideWidth;
desc->Internal.PaddingHeight = paddingHeight;
desc->Internal.PaddingWidth = paddingWidth;
desc->Internal.DilationHeight = dilationHeight;
desc->Internal.DilationWidth = dilationWidth;
return desc;
}
void CCudaMathEngine::BlobConvolution( const CConvolutionDesc& convDesc,
const CFloatHandle& sourceData, const CFloatHandle& filterData, const CFloatHandle* freeTermData,
const CFloatHandle& resultData )
{
SetCudaDevice( device->DeviceNumber );
const CCudaConvolutionDescInternal& desc = static_cast<const CCudaConvolutionDesc&>( convDesc ).Internal;
const CCudaBlobDesc& source = desc.Source;
const CCudaBlobDesc& filter = desc.Filter;
const CCudaBlobDesc& result = desc.Result;
if( filter.Height() == 3 && filter.Width() == 3
&& desc.StrideHeight == 1 && desc.StrideWidth == 1
&& desc.DilationHeight == 1 && desc.DilationWidth == 1
&& source.Channels() * source.Depth() < 16 )
{
// Use a convolution kernel of size 3*3 with stride 1
dim3 blockCount;
dim3 threadCount;
int widthNorm = ( desc.Result.Width() + 7 ) / 8;
getCudaTaskGrid3DMinZYX( 1, 1, 1024, blockCount, threadCount, result.ObjectCount() * result.Height(), widthNorm,
filter.ObjectCount(), 512 );
Conv3x3s1d1Kernel1x8<<<blockCount, threadCount>>>( desc, GetRaw( sourceData ), GetRaw( filterData ),
freeTermData == 0 ? 0 : GetRaw( *freeTermData ), GetRaw( resultData ), widthNorm );
return;
}
if( filter.Height() == 1 && filter.Width() == 1
&& desc.StrideHeight == 1 && desc.StrideWidth == 1
&& desc.PaddingHeight == 0 && desc.PaddingWidth == 0 )
{
// The convolution is a matrix product anyway, without a temporary matrix
if( freeTermData != 0 ) {
// Fill the output matrix with the free term values
SetVectorToMatrixRows( resultData, result.ObjectCount() * result.Height() * result.Width(),
filter.ObjectCount(), *freeTermData );
multiplyMatrixByTransposedMatrixAndAdd( sourceData,
source.ObjectCount() * result.Height() * result.Width(),
filter.ObjectSize(), filter.ObjectSize(), filterData,
filter.ObjectCount(), filter.ObjectSize(), resultData,
filter.ObjectCount() );
} else {
MultiplyMatrixByTransposedMatrix( sourceData,
source.ObjectCount() * result.Height() * result.Width(),
filter.ObjectSize(), filter.ObjectSize(), filterData,
filter.ObjectCount(), filter.ObjectSize(), resultData,
filter.ObjectCount(), result.BlobSize() );
}
return;
}
const int tempMatrixWidth = filter.ObjectSize();
const int tempMatrixHeight = result.ObjectCount() * result.ObjectSize() / filter.ObjectCount();
const int tempMatrixHeightBatchSize = getCudaTempMatrixMaxHeight( tempMatrixHeight, tempMatrixWidth );
CFloatHandleStackVar tempMatrix( mathEngine(), tempMatrixHeightBatchSize * tempMatrixWidth );
int tempMatrixHeightIndex = 0;
while( tempMatrixHeightIndex < tempMatrixHeight ) {
int curTempMatrixHeight = min( tempMatrixHeight - tempMatrixHeightIndex, tempMatrixHeightBatchSize );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D( blockCount, threadCount, curTempMatrixHeight, source.Depth() * source.Channels() );
BuildTempMatrixKernel<<<blockCount, threadCount>>>( desc, GetRaw( sourceData ),
tempMatrixHeightIndex, curTempMatrixHeight, GetRaw( tempMatrix.GetHandle() ) );
MultiplyMatrixByTransposedMatrix( tempMatrix, curTempMatrixHeight, filter.ObjectSize(), filter.ObjectSize(),
filterData, filter.ObjectCount(), filter.ObjectSize(),
resultData + tempMatrixHeightIndex * filter.ObjectCount(),
filter.ObjectCount(), curTempMatrixHeight * filter.ObjectCount() );
tempMatrixHeightIndex += curTempMatrixHeight;
}
if( freeTermData != 0 ) {
// Fill the output with the free term values
AddVectorToMatrixRows( 1, resultData, resultData,
result.BlobSize() / filter.ObjectCount(), filter.ObjectCount(), *freeTermData );
}
}
void CCudaMathEngine::BlobConvolutionBackward( const CConvolutionDesc& convDesc, const CFloatHandle& outputDiff,
const CFloatHandle& filter, const CFloatHandle* freeTerm, const CFloatHandle& inputDiff )
{
SetCudaDevice( device->DeviceNumber );
const CCudaConvolutionDescInternal& desc = static_cast<const CCudaConvolutionDesc&>( convDesc ).Internal;
const int filterCount = desc.Filter.ObjectCount();
const int filterObjectSize = desc.Filter.ObjectSize();
if( desc.Filter.Height() == 1 && desc.Filter.Width() == 1
&& desc.StrideHeight == 1 && desc.StrideWidth == 1
&& desc.PaddingHeight == 0 && desc.PaddingWidth == 0 )
{
// The convolution backward pass is a matrix product without creating a temporary matrix
MultiplyMatrixByMatrix( 1, outputDiff, desc.Result.BlobSize() / filterCount, filterCount,
filter, filterObjectSize, inputDiff, desc.Source.BlobSize() );
if( freeTerm != 0 ) {
AddVectorToMatrixRows( 1, inputDiff, inputDiff, desc.Source.ObjectCount() * desc.Source.Height() * desc.Source.Width(),
desc.Source.Channels() * desc.Source.Depth(), *freeTerm );
}
return;
}
if( freeTerm != 0 ) {
// Fill the input gradients with the free terms
SetVectorToMatrixRows( inputDiff, desc.Source.ObjectCount() * desc.Source.Height() * desc.Source.Width(),
desc.Source.Channels() * desc.Source.Depth(), *freeTerm );
} else {
VectorFill( inputDiff, 0.f, desc.Source.BlobSize() );
}
TBackwardOperationType operation = BOT_AtomicAdd;
if( ( desc.Filter.Width() - 1 ) * desc.DilationWidth + 1 <= desc.StrideWidth
&& ( desc.Filter.Height() - 1 ) * desc.DilationHeight + 1 <= desc.StrideHeight )
{
// The filter areas do not intersect, so atomic operations are not needed
operation = freeTerm == 0 ? BOT_Set : BOT_Add;
}
// Get the temporary matrix
const int matrixHeight = tempMatrixHeight( desc );
const int matrixWidth = tempMatrixWidth( desc );
const int tempMatrixHeightBatchSize = getCudaTempMatrixMaxHeight( matrixHeight, matrixWidth );
CFloatHandleStackVar tempMatrix( *this, tempMatrixHeightBatchSize * matrixWidth );
int tempMatrixHeightIndex = 0;
while( tempMatrixHeightIndex < matrixHeight ) {
int curTempMatrixHeight = min( matrixHeight - tempMatrixHeightIndex, tempMatrixHeightBatchSize );
MultiplyMatrixByMatrix( 1, outputDiff + tempMatrixHeightIndex * filterCount, curTempMatrixHeight, filterCount,
filter, filterObjectSize, tempMatrix, tempMatrix.Size() );
// Get the input gradients from the temporary matrix data
dim3 blockCount;
dim3 threadCount;
int widthNorm = ( matrixWidth + BuildInputFromTempMatrixCombine - 1 ) / BuildInputFromTempMatrixCombine;
getCudaTaskGrid2D( blockCount, threadCount, curTempMatrixHeight, widthNorm );
BuildInputFromTempMatrixKernel<<<blockCount, threadCount>>>( desc, GetRaw( tempMatrix.GetHandle() ),
curTempMatrixHeight, matrixWidth, GetRaw( inputDiff ), operation, widthNorm, tempMatrixHeightIndex );
tempMatrixHeightIndex += curTempMatrixHeight;
}
}
void CCudaMathEngine::BlobConvolutionLearnAdd( const CConvolutionDesc& convDesc,
const CFloatHandle& input, const CFloatHandle& outputDiff, const CFloatHandle& filterDiff,
const CFloatHandle* freeTermDiff, bool isFreeTermDiffFromInput )
{
SetCudaDevice( device->DeviceNumber );
const CCudaConvolutionDescInternal& desc = static_cast<const CCudaConvolutionDesc&>( convDesc ).Internal;
if( freeTermDiff != 0 ) {
// Get the free term gradient
if( !isFreeTermDiffFromInput ) {
SumMatrixRowsAdd( 1, *freeTermDiff, outputDiff, desc.Result.BlobSize() / desc.Filter.ObjectCount(),
desc.Filter.ObjectCount() );
} else {
SumMatrixRowsAdd( 1, *freeTermDiff, input, desc.Source.BlobSize() / desc.Source.Channels(),
desc.Source.Channels() );
}
}
// Build the temporary matrix
const int matrixHeight = tempMatrixHeight( desc );
const int matrixWidth = tempMatrixWidth( desc );
const int filterCount = desc.Filter.ObjectCount();
const int tempMatrixHeightBatchSize = getCudaTempMatrixMaxHeight( matrixHeight, matrixWidth );
CFloatHandleStackVar tempMatrix( *this, tempMatrixHeightBatchSize * matrixWidth );
int tempMatrixHeightIndex = 0;
while( tempMatrixHeightIndex < matrixHeight ) {
int curTempMatrixHeight = min( matrixHeight - tempMatrixHeightIndex, tempMatrixHeightBatchSize );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D( blockCount, threadCount, curTempMatrixHeight, desc.Source.Depth() * desc.Source.Channels() );
BuildTempMatrixKernel<<<blockCount, threadCount>>>( desc, GetRaw( input ), tempMatrixHeightIndex, curTempMatrixHeight,
GetRaw( tempMatrix.GetHandle() ) );
// Get the filter gradients by multiplying the temporary matrix and the output gradients
MultiplyTransposedMatrixByMatrixAndAdd( outputDiff + tempMatrixHeightIndex * filterCount, curTempMatrixHeight,
filterCount, filterCount, tempMatrix, matrixWidth, matrixWidth, filterDiff, matrixWidth, desc.Filter.BlobSize() );
tempMatrixHeightIndex += curTempMatrixHeight;
}
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
|
52d1915655333795037b42c1fddf452fa76c834b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO Avro reader class implementation
**/
#include "reader_impl.hpp"
#include <io/comp/gpuinflate.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
namespace cudf {
namespace experimental {
namespace io {
namespace detail {
namespace avro {
// Import functionality that's independent of legacy code
using namespace cudf::io::avro;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates Avro data kind to cuDF type enum
**/
type_id to_type_id(const avro::schema_entry *col) {
switch (col->kind) {
case avro::type_boolean:
return type_id::BOOL8;
case avro::type_int:
return type_id::INT32;
case avro::type_long:
return type_id::INT64;
case avro::type_float:
return type_id::FLOAT32;
case avro::type_double:
return type_id::FLOAT64;
case avro::type_bytes:
case avro::type_string:
return type_id::STRING;
case avro::type_enum:
return (!col->symbols.empty()) ? type_id::STRING : type_id::INT32;
default:
return type_id::EMPTY;
}
}
} // namespace
/**
* @brief A helper wrapper for Avro file metadata. Provides some additional
* convenience methods for initializing and accessing the metadata and schema
**/
class metadata : public file_metadata {
public:
explicit metadata(datasource *const src) : source(src) {}
/**
* @brief Initializes the parser and filters down to a subset of rows
*
* @param[in,out] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
**/
void init_and_select_rows(int &row_start, int &row_count) {
const auto buffer = source->get_buffer(0, source->size());
avro::container pod(buffer->data(), buffer->size());
CUDF_EXPECTS(pod.parse(this, row_count, row_start),
"Cannot parse metadata");
row_start = skip_rows;
row_count = num_rows;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
*
* @return List of column names
**/
auto select_columns(std::vector<std::string> use_names) {
std::vector<std::pair<int, std::string>> selection;
const auto num_avro_columns = static_cast<int>(columns.size());
if (!use_names.empty()) {
int index = 0;
for (const auto &use_name : use_names) {
for (int i = 0; i < num_avro_columns; ++i, ++index) {
if (index >= num_avro_columns) {
index = 0;
}
if (columns[index].name == use_name &&
type_id::EMPTY !=
to_type_id(&schema[columns[index].schema_data_idx])) {
selection.emplace_back(index, columns[index].name);
index++;
break;
}
}
}
} else {
for (int i = 0; i < num_avro_columns; ++i) {
auto col_type = to_type_id(&schema[columns[i].schema_data_idx]);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unsupported data type");
selection.emplace_back(i, columns[i].name);
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
return selection;
}
private:
datasource *const source;
};
rmm::device_buffer reader::impl::decompress_data(
const rmm::device_buffer &comp_block_data, hipStream_t stream) {
size_t uncompressed_data_size = 0;
hostdevice_vector<gpu_inflate_input_s> inflate_in(
_metadata->block_list.size());
hostdevice_vector<gpu_inflate_status_s> inflate_out(
_metadata->block_list.size());
if (_metadata->codec == "deflate") {
// Guess an initial maximum uncompressed block size
uint32_t initial_blk_len = (_metadata->max_block_size * 2 + 0xfff) & ~0xfff;
uncompressed_data_size = initial_blk_len * _metadata->block_list.size();
for (size_t i = 0; i < inflate_in.size(); ++i) {
inflate_in[i].dstSize = initial_blk_len;
}
} else if (_metadata->codec == "snappy") {
// Extract the uncompressed length from the snappy stream
for (size_t i = 0; i < _metadata->block_list.size(); i++) {
const auto buffer =
_source->get_buffer(_metadata->block_list[i].offset, 4);
const uint8_t *blk = buffer->data();
uint32_t blk_len = blk[0];
if (blk_len > 0x7f) {
blk_len = (blk_len & 0x7f) | (blk[1] << 7);
if (blk_len > 0x3fff) {
blk_len = (blk_len & 0x3fff) | (blk[2] << 14);
if (blk_len > 0x1fffff) {
blk_len = (blk_len & 0x1fffff) | (blk[3] << 21);
}
}
}
inflate_in[i].dstSize = blk_len;
uncompressed_data_size += blk_len;
}
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
rmm::device_buffer decomp_block_data(uncompressed_data_size, stream);
const auto base_offset = _metadata->block_list[0].offset;
for (size_t i = 0, dst_pos = 0; i < _metadata->block_list.size(); i++) {
const auto src_pos = _metadata->block_list[i].offset - base_offset;
inflate_in[i].srcDevice =
static_cast<const uint8_t *>(comp_block_data.data()) + src_pos;
inflate_in[i].srcSize = _metadata->block_list[i].size;
inflate_in[i].dstDevice =
static_cast<uint8_t *>(decomp_block_data.data()) + dst_pos;
// Update blocks offsets & sizes to refer to uncompressed data
_metadata->block_list[i].offset = dst_pos;
_metadata->block_list[i].size =
static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += _metadata->block_list[i].size;
}
for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) {
CUDA_TRY(hipMemcpyAsync(inflate_in.device_ptr(), inflate_in.host_ptr(),
inflate_in.memory_size(), hipMemcpyHostToDevice,
stream));
CUDA_TRY(hipMemsetAsync(inflate_out.device_ptr(), 0,
inflate_out.memory_size(), stream));
if (_metadata->codec == "deflate") {
CUDA_TRY(gpuinflate(inflate_in.device_ptr(), inflate_out.device_ptr(),
inflate_in.size(), 0, stream));
} else if (_metadata->codec == "snappy") {
CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(), inflate_out.device_ptr(),
inflate_in.size(), stream));
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
CUDA_TRY(hipMemcpyAsync(inflate_out.host_ptr(), inflate_out.device_ptr(),
inflate_out.memory_size(), hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
// Check if larger output is required, as it's not known ahead of time
if (_metadata->codec == "deflate" && !loop_cnt) {
size_t actual_uncompressed_size = 0;
for (size_t i = 0; i < _metadata->block_list.size(); i++) {
// If error status is 1 (buffer too small), the `bytes_written` field
// is actually contains the uncompressed data size
if (inflate_out[i].status == 1 &&
inflate_out[i].bytes_written > inflate_in[i].dstSize) {
inflate_in[i].dstSize = inflate_out[i].bytes_written;
}
actual_uncompressed_size += inflate_in[i].dstSize;
}
if (actual_uncompressed_size > uncompressed_data_size) {
decomp_block_data.resize(actual_uncompressed_size);
for (size_t i = 0, dst_pos = 0; i < _metadata->block_list.size(); i++) {
auto dst_base = static_cast<uint8_t *>(decomp_block_data.data());
inflate_in[i].dstDevice = dst_base + dst_pos;
_metadata->block_list[i].offset = dst_pos;
_metadata->block_list[i].size =
static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += _metadata->block_list[i].size;
}
} else {
break;
}
} else {
break;
}
}
return decomp_block_data;
}
void reader::impl::decode_data(
const rmm::device_buffer &block_data,
const std::vector<std::pair<uint32_t, uint32_t>> &dict,
const hostdevice_vector<uint8_t> &global_dictionary,
size_t total_dictionary_entries, size_t num_rows,
std::vector<std::pair<int, std::string>> selection,
std::vector<column_buffer> &out_buffers, hipStream_t stream) {
// Build gpu schema
hostdevice_vector<gpu::schemadesc_s> schema_desc(_metadata->schema.size());
uint32_t min_row_data_size = 0;
int skip_field_cnt = 0;
for (size_t i = 0; i < _metadata->schema.size(); i++) {
type_kind_e kind = _metadata->schema[i].kind;
if (skip_field_cnt != 0) {
// Exclude union members from min_row_data_size
skip_field_cnt += _metadata->schema[i].num_children - 1;
} else {
switch (kind) {
case type_union:
skip_field_cnt = _metadata->schema[i].num_children;
// fall through
case type_boolean:
case type_int:
case type_long:
case type_bytes:
case type_string:
case type_enum:
min_row_data_size += 1;
break;
case type_float:
min_row_data_size += 4;
break;
case type_double:
min_row_data_size += 8;
break;
default:
break;
}
}
if (kind == type_enum && !_metadata->schema[i].symbols.size()) {
kind = type_int;
}
schema_desc[i].kind = kind;
schema_desc[i].count =
(kind == type_enum) ? 0 : (uint32_t)_metadata->schema[i].num_children;
schema_desc[i].dataptr = nullptr;
CUDF_EXPECTS(kind != type_union || _metadata->schema[i].num_children < 2 ||
(_metadata->schema[i].num_children == 2 &&
(_metadata->schema[i + 1].kind == type_null ||
_metadata->schema[i + 2].kind == type_null)),
"Union with non-null type not currently supported");
}
std::vector<void *> valid_alias(out_buffers.size(), nullptr);
for (size_t i = 0; i < out_buffers.size(); i++) {
const auto col_idx = selection[i].first;
int schema_data_idx = _metadata->columns[col_idx].schema_data_idx;
int schema_null_idx = _metadata->columns[col_idx].schema_null_idx;
schema_desc[schema_data_idx].dataptr = out_buffers[i].data();
if (schema_null_idx >= 0) {
if (!schema_desc[schema_null_idx].dataptr) {
schema_desc[schema_null_idx].dataptr = out_buffers[i].null_mask();
} else {
valid_alias[i] = schema_desc[schema_null_idx].dataptr;
}
}
if (_metadata->schema[schema_data_idx].kind == type_enum) {
schema_desc[schema_data_idx].count = dict[i].first;
}
CUDA_TRY(hipMemsetAsync(out_buffers[i].null_mask(), -1,
bitmask_allocation_size_bytes(num_rows), stream));
}
rmm::device_buffer block_list(
_metadata->block_list.data(),
_metadata->block_list.size() * sizeof(block_desc_s), stream);
CUDA_TRY(hipMemcpyAsync(schema_desc.device_ptr(), schema_desc.host_ptr(),
schema_desc.memory_size(), hipMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::DecodeAvroColumnData(
static_cast<block_desc_s *>(block_list.data()), schema_desc.device_ptr(),
reinterpret_cast<gpu::nvstrdesc_s *>(global_dictionary.device_ptr()),
static_cast<const uint8_t *>(block_data.data()),
static_cast<uint32_t>(_metadata->block_list.size()),
static_cast<uint32_t>(schema_desc.size()),
static_cast<uint32_t>(total_dictionary_entries), _metadata->num_rows,
_metadata->skip_rows, min_row_data_size, stream));
// Copy valid bits that are shared between columns
for (size_t i = 0; i < out_buffers.size(); i++) {
if (valid_alias[i] != nullptr) {
CUDA_TRY(hipMemcpyAsync(out_buffers[i].null_mask(), valid_alias[i],
out_buffers[i].null_mask_size(),
hipMemcpyHostToDevice, stream));
}
}
CUDA_TRY(hipMemcpyAsync(schema_desc.host_ptr(), schema_desc.device_ptr(),
schema_desc.memory_size(), hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
for (size_t i = 0; i < out_buffers.size(); i++) {
const auto col_idx = selection[i].first;
const auto schema_null_idx = _metadata->columns[col_idx].schema_null_idx;
out_buffers[i].null_count() =
(schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0;
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _source(std::move(source)), _mr(mr), _columns(options.columns) {
// Open the source Avro dataset metadata
_metadata = std::make_unique<metadata>(_source.get());
}
table_with_metadata reader::impl::read(int skip_rows, int num_rows,
hipStream_t stream) {
std::vector<std::unique_ptr<column>> out_columns;
table_metadata metadata_out;
// Select and read partial metadata / schema within the subset of rows
_metadata->init_and_select_rows(skip_rows, num_rows);
// Select only columns required by the options
auto selected_columns = _metadata->select_columns(_columns);
if (selected_columns.size() != 0) {
// Get a list of column data types
std::vector<data_type> column_types;
for (const auto &col : selected_columns) {
auto &col_schema =
_metadata->schema[_metadata->columns[col.first].schema_data_idx];
auto col_type = to_type_id(&col_schema);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
}
if (_metadata->total_data_size > 0) {
const auto buffer = _source->get_buffer(_metadata->block_list[0].offset,
_metadata->total_data_size);
rmm::device_buffer block_data(buffer->data(), buffer->size(), stream);
if (_metadata->codec != "" && _metadata->codec != "null") {
auto decomp_block_data = decompress_data(block_data, stream);
block_data = std::move(decomp_block_data);
} else {
auto dst_ofs = _metadata->block_list[0].offset;
for (size_t i = 0; i < _metadata->block_list.size(); i++) {
_metadata->block_list[i].offset -= dst_ofs;
}
}
size_t total_dictionary_entries = 0;
size_t dictionary_data_size = 0;
std::vector<std::pair<uint32_t, uint32_t>> dict(column_types.size());
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
auto &col_schema =
_metadata->schema[_metadata->columns[col_idx].schema_data_idx];
dict[i].first = static_cast<uint32_t>(total_dictionary_entries);
dict[i].second = static_cast<uint32_t>(col_schema.symbols.size());
total_dictionary_entries += dict[i].second;
for (const auto &sym : col_schema.symbols) {
dictionary_data_size += sym.length();
}
}
hostdevice_vector<uint8_t> global_dictionary(
total_dictionary_entries * sizeof(gpu::nvstrdesc_s) +
dictionary_data_size);
if (total_dictionary_entries > 0) {
size_t dict_pos = total_dictionary_entries * sizeof(gpu::nvstrdesc_s);
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
auto &col_schema =
_metadata->schema[_metadata->columns[col_idx].schema_data_idx];
auto index = &(reinterpret_cast<gpu::nvstrdesc_s *>(
global_dictionary.host_ptr()))[dict[i].first];
for (size_t j = 0; j < dict[i].second; j++) {
size_t len = col_schema.symbols[j].length();
char *ptr = reinterpret_cast<char *>(
global_dictionary.device_ptr() + dict_pos);
index[j].ptr = ptr;
index[j].count = len;
memcpy(global_dictionary.host_ptr() + dict_pos,
col_schema.symbols[j].c_str(), len);
dict_pos += len;
}
}
CUDA_TRY(hipMemcpyAsync(
global_dictionary.device_ptr(), global_dictionary.host_ptr(),
global_dictionary.memory_size(), hipMemcpyHostToDevice, stream));
}
std::vector<column_buffer> out_buffers;
for (size_t i = 0; i < column_types.size(); ++i) {
out_buffers.emplace_back(column_types[i], num_rows, stream, _mr);
}
decode_data(block_data, dict, global_dictionary, total_dictionary_entries,
num_rows, selected_columns, out_buffers, stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(column_types[i], num_rows,
out_buffers[i], stream, _mr));
}
}
}
// Return column names (must match order of returned columns)
metadata_out.column_names.resize(selected_columns.size());
for (size_t i = 0; i < selected_columns.size(); i++) {
metadata_out.column_names[i] = selected_columns[i].second;
}
// Return user metadata
metadata_out.user_data = _metadata->user_data;
return { std::make_unique<table>(std::move(out_columns)), std::move(metadata_out) };
}
// Forward to implementation
reader::reader(std::string filepath, reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(filepath), options, mr)) {
}
// Forward to implementation
reader::reader(const char *buffer, size_t length, reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(buffer, length), options,
mr)) {}
// Forward to implementation
reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(file), options, mr)) {}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read_all(hipStream_t stream) {
return _impl->read(0, -1, stream);
}
// Forward to implementation
table_with_metadata reader::read_rows(size_type skip_rows,
size_type num_rows,
hipStream_t stream) {
return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, stream);
}
} // namespace avro
} // namespace detail
} // namespace io
} // namespace experimental
} // namespace cudf
| 52d1915655333795037b42c1fddf452fa76c834b.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO Avro reader class implementation
**/
#include "reader_impl.hpp"
#include <io/comp/gpuinflate.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
namespace cudf {
namespace experimental {
namespace io {
namespace detail {
namespace avro {
// Import functionality that's independent of legacy code
using namespace cudf::io::avro;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates Avro data kind to cuDF type enum
**/
type_id to_type_id(const avro::schema_entry *col) {
switch (col->kind) {
case avro::type_boolean:
return type_id::BOOL8;
case avro::type_int:
return type_id::INT32;
case avro::type_long:
return type_id::INT64;
case avro::type_float:
return type_id::FLOAT32;
case avro::type_double:
return type_id::FLOAT64;
case avro::type_bytes:
case avro::type_string:
return type_id::STRING;
case avro::type_enum:
return (!col->symbols.empty()) ? type_id::STRING : type_id::INT32;
default:
return type_id::EMPTY;
}
}
} // namespace
/**
* @brief A helper wrapper for Avro file metadata. Provides some additional
* convenience methods for initializing and accessing the metadata and schema
**/
class metadata : public file_metadata {
public:
explicit metadata(datasource *const src) : source(src) {}
/**
* @brief Initializes the parser and filters down to a subset of rows
*
* @param[in,out] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
**/
void init_and_select_rows(int &row_start, int &row_count) {
const auto buffer = source->get_buffer(0, source->size());
avro::container pod(buffer->data(), buffer->size());
CUDF_EXPECTS(pod.parse(this, row_count, row_start),
"Cannot parse metadata");
row_start = skip_rows;
row_count = num_rows;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
*
* @return List of column names
**/
auto select_columns(std::vector<std::string> use_names) {
std::vector<std::pair<int, std::string>> selection;
const auto num_avro_columns = static_cast<int>(columns.size());
if (!use_names.empty()) {
int index = 0;
for (const auto &use_name : use_names) {
for (int i = 0; i < num_avro_columns; ++i, ++index) {
if (index >= num_avro_columns) {
index = 0;
}
if (columns[index].name == use_name &&
type_id::EMPTY !=
to_type_id(&schema[columns[index].schema_data_idx])) {
selection.emplace_back(index, columns[index].name);
index++;
break;
}
}
}
} else {
for (int i = 0; i < num_avro_columns; ++i) {
auto col_type = to_type_id(&schema[columns[i].schema_data_idx]);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unsupported data type");
selection.emplace_back(i, columns[i].name);
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
return selection;
}
private:
datasource *const source;
};
rmm::device_buffer reader::impl::decompress_data(
const rmm::device_buffer &comp_block_data, cudaStream_t stream) {
size_t uncompressed_data_size = 0;
hostdevice_vector<gpu_inflate_input_s> inflate_in(
_metadata->block_list.size());
hostdevice_vector<gpu_inflate_status_s> inflate_out(
_metadata->block_list.size());
if (_metadata->codec == "deflate") {
// Guess an initial maximum uncompressed block size
uint32_t initial_blk_len = (_metadata->max_block_size * 2 + 0xfff) & ~0xfff;
uncompressed_data_size = initial_blk_len * _metadata->block_list.size();
for (size_t i = 0; i < inflate_in.size(); ++i) {
inflate_in[i].dstSize = initial_blk_len;
}
} else if (_metadata->codec == "snappy") {
// Extract the uncompressed length from the snappy stream
for (size_t i = 0; i < _metadata->block_list.size(); i++) {
const auto buffer =
_source->get_buffer(_metadata->block_list[i].offset, 4);
const uint8_t *blk = buffer->data();
uint32_t blk_len = blk[0];
if (blk_len > 0x7f) {
blk_len = (blk_len & 0x7f) | (blk[1] << 7);
if (blk_len > 0x3fff) {
blk_len = (blk_len & 0x3fff) | (blk[2] << 14);
if (blk_len > 0x1fffff) {
blk_len = (blk_len & 0x1fffff) | (blk[3] << 21);
}
}
}
inflate_in[i].dstSize = blk_len;
uncompressed_data_size += blk_len;
}
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
rmm::device_buffer decomp_block_data(uncompressed_data_size, stream);
const auto base_offset = _metadata->block_list[0].offset;
for (size_t i = 0, dst_pos = 0; i < _metadata->block_list.size(); i++) {
const auto src_pos = _metadata->block_list[i].offset - base_offset;
inflate_in[i].srcDevice =
static_cast<const uint8_t *>(comp_block_data.data()) + src_pos;
inflate_in[i].srcSize = _metadata->block_list[i].size;
inflate_in[i].dstDevice =
static_cast<uint8_t *>(decomp_block_data.data()) + dst_pos;
// Update blocks offsets & sizes to refer to uncompressed data
_metadata->block_list[i].offset = dst_pos;
_metadata->block_list[i].size =
static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += _metadata->block_list[i].size;
}
for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) {
CUDA_TRY(cudaMemcpyAsync(inflate_in.device_ptr(), inflate_in.host_ptr(),
inflate_in.memory_size(), cudaMemcpyHostToDevice,
stream));
CUDA_TRY(cudaMemsetAsync(inflate_out.device_ptr(), 0,
inflate_out.memory_size(), stream));
if (_metadata->codec == "deflate") {
CUDA_TRY(gpuinflate(inflate_in.device_ptr(), inflate_out.device_ptr(),
inflate_in.size(), 0, stream));
} else if (_metadata->codec == "snappy") {
CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(), inflate_out.device_ptr(),
inflate_in.size(), stream));
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
CUDA_TRY(cudaMemcpyAsync(inflate_out.host_ptr(), inflate_out.device_ptr(),
inflate_out.memory_size(), cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
// Check if larger output is required, as it's not known ahead of time
if (_metadata->codec == "deflate" && !loop_cnt) {
size_t actual_uncompressed_size = 0;
for (size_t i = 0; i < _metadata->block_list.size(); i++) {
// If error status is 1 (buffer too small), the `bytes_written` field
// is actually contains the uncompressed data size
if (inflate_out[i].status == 1 &&
inflate_out[i].bytes_written > inflate_in[i].dstSize) {
inflate_in[i].dstSize = inflate_out[i].bytes_written;
}
actual_uncompressed_size += inflate_in[i].dstSize;
}
if (actual_uncompressed_size > uncompressed_data_size) {
decomp_block_data.resize(actual_uncompressed_size);
for (size_t i = 0, dst_pos = 0; i < _metadata->block_list.size(); i++) {
auto dst_base = static_cast<uint8_t *>(decomp_block_data.data());
inflate_in[i].dstDevice = dst_base + dst_pos;
_metadata->block_list[i].offset = dst_pos;
_metadata->block_list[i].size =
static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += _metadata->block_list[i].size;
}
} else {
break;
}
} else {
break;
}
}
return decomp_block_data;
}
void reader::impl::decode_data(
const rmm::device_buffer &block_data,
const std::vector<std::pair<uint32_t, uint32_t>> &dict,
const hostdevice_vector<uint8_t> &global_dictionary,
size_t total_dictionary_entries, size_t num_rows,
std::vector<std::pair<int, std::string>> selection,
std::vector<column_buffer> &out_buffers, cudaStream_t stream) {
// Build gpu schema
hostdevice_vector<gpu::schemadesc_s> schema_desc(_metadata->schema.size());
uint32_t min_row_data_size = 0;
int skip_field_cnt = 0;
for (size_t i = 0; i < _metadata->schema.size(); i++) {
type_kind_e kind = _metadata->schema[i].kind;
if (skip_field_cnt != 0) {
// Exclude union members from min_row_data_size
skip_field_cnt += _metadata->schema[i].num_children - 1;
} else {
switch (kind) {
case type_union:
skip_field_cnt = _metadata->schema[i].num_children;
// fall through
case type_boolean:
case type_int:
case type_long:
case type_bytes:
case type_string:
case type_enum:
min_row_data_size += 1;
break;
case type_float:
min_row_data_size += 4;
break;
case type_double:
min_row_data_size += 8;
break;
default:
break;
}
}
if (kind == type_enum && !_metadata->schema[i].symbols.size()) {
kind = type_int;
}
schema_desc[i].kind = kind;
schema_desc[i].count =
(kind == type_enum) ? 0 : (uint32_t)_metadata->schema[i].num_children;
schema_desc[i].dataptr = nullptr;
CUDF_EXPECTS(kind != type_union || _metadata->schema[i].num_children < 2 ||
(_metadata->schema[i].num_children == 2 &&
(_metadata->schema[i + 1].kind == type_null ||
_metadata->schema[i + 2].kind == type_null)),
"Union with non-null type not currently supported");
}
std::vector<void *> valid_alias(out_buffers.size(), nullptr);
for (size_t i = 0; i < out_buffers.size(); i++) {
const auto col_idx = selection[i].first;
int schema_data_idx = _metadata->columns[col_idx].schema_data_idx;
int schema_null_idx = _metadata->columns[col_idx].schema_null_idx;
schema_desc[schema_data_idx].dataptr = out_buffers[i].data();
if (schema_null_idx >= 0) {
if (!schema_desc[schema_null_idx].dataptr) {
schema_desc[schema_null_idx].dataptr = out_buffers[i].null_mask();
} else {
valid_alias[i] = schema_desc[schema_null_idx].dataptr;
}
}
if (_metadata->schema[schema_data_idx].kind == type_enum) {
schema_desc[schema_data_idx].count = dict[i].first;
}
CUDA_TRY(cudaMemsetAsync(out_buffers[i].null_mask(), -1,
bitmask_allocation_size_bytes(num_rows), stream));
}
rmm::device_buffer block_list(
_metadata->block_list.data(),
_metadata->block_list.size() * sizeof(block_desc_s), stream);
CUDA_TRY(cudaMemcpyAsync(schema_desc.device_ptr(), schema_desc.host_ptr(),
schema_desc.memory_size(), cudaMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::DecodeAvroColumnData(
static_cast<block_desc_s *>(block_list.data()), schema_desc.device_ptr(),
reinterpret_cast<gpu::nvstrdesc_s *>(global_dictionary.device_ptr()),
static_cast<const uint8_t *>(block_data.data()),
static_cast<uint32_t>(_metadata->block_list.size()),
static_cast<uint32_t>(schema_desc.size()),
static_cast<uint32_t>(total_dictionary_entries), _metadata->num_rows,
_metadata->skip_rows, min_row_data_size, stream));
// Copy valid bits that are shared between columns
for (size_t i = 0; i < out_buffers.size(); i++) {
if (valid_alias[i] != nullptr) {
CUDA_TRY(cudaMemcpyAsync(out_buffers[i].null_mask(), valid_alias[i],
out_buffers[i].null_mask_size(),
cudaMemcpyHostToDevice, stream));
}
}
CUDA_TRY(cudaMemcpyAsync(schema_desc.host_ptr(), schema_desc.device_ptr(),
schema_desc.memory_size(), cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
for (size_t i = 0; i < out_buffers.size(); i++) {
const auto col_idx = selection[i].first;
const auto schema_null_idx = _metadata->columns[col_idx].schema_null_idx;
out_buffers[i].null_count() =
(schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0;
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _source(std::move(source)), _mr(mr), _columns(options.columns) {
// Open the source Avro dataset metadata
_metadata = std::make_unique<metadata>(_source.get());
}
table_with_metadata reader::impl::read(int skip_rows, int num_rows,
cudaStream_t stream) {
std::vector<std::unique_ptr<column>> out_columns;
table_metadata metadata_out;
// Select and read partial metadata / schema within the subset of rows
_metadata->init_and_select_rows(skip_rows, num_rows);
// Select only columns required by the options
auto selected_columns = _metadata->select_columns(_columns);
if (selected_columns.size() != 0) {
// Get a list of column data types
std::vector<data_type> column_types;
for (const auto &col : selected_columns) {
auto &col_schema =
_metadata->schema[_metadata->columns[col.first].schema_data_idx];
auto col_type = to_type_id(&col_schema);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
}
if (_metadata->total_data_size > 0) {
const auto buffer = _source->get_buffer(_metadata->block_list[0].offset,
_metadata->total_data_size);
rmm::device_buffer block_data(buffer->data(), buffer->size(), stream);
if (_metadata->codec != "" && _metadata->codec != "null") {
auto decomp_block_data = decompress_data(block_data, stream);
block_data = std::move(decomp_block_data);
} else {
auto dst_ofs = _metadata->block_list[0].offset;
for (size_t i = 0; i < _metadata->block_list.size(); i++) {
_metadata->block_list[i].offset -= dst_ofs;
}
}
size_t total_dictionary_entries = 0;
size_t dictionary_data_size = 0;
std::vector<std::pair<uint32_t, uint32_t>> dict(column_types.size());
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
auto &col_schema =
_metadata->schema[_metadata->columns[col_idx].schema_data_idx];
dict[i].first = static_cast<uint32_t>(total_dictionary_entries);
dict[i].second = static_cast<uint32_t>(col_schema.symbols.size());
total_dictionary_entries += dict[i].second;
for (const auto &sym : col_schema.symbols) {
dictionary_data_size += sym.length();
}
}
hostdevice_vector<uint8_t> global_dictionary(
total_dictionary_entries * sizeof(gpu::nvstrdesc_s) +
dictionary_data_size);
if (total_dictionary_entries > 0) {
size_t dict_pos = total_dictionary_entries * sizeof(gpu::nvstrdesc_s);
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
auto &col_schema =
_metadata->schema[_metadata->columns[col_idx].schema_data_idx];
auto index = &(reinterpret_cast<gpu::nvstrdesc_s *>(
global_dictionary.host_ptr()))[dict[i].first];
for (size_t j = 0; j < dict[i].second; j++) {
size_t len = col_schema.symbols[j].length();
char *ptr = reinterpret_cast<char *>(
global_dictionary.device_ptr() + dict_pos);
index[j].ptr = ptr;
index[j].count = len;
memcpy(global_dictionary.host_ptr() + dict_pos,
col_schema.symbols[j].c_str(), len);
dict_pos += len;
}
}
CUDA_TRY(cudaMemcpyAsync(
global_dictionary.device_ptr(), global_dictionary.host_ptr(),
global_dictionary.memory_size(), cudaMemcpyHostToDevice, stream));
}
std::vector<column_buffer> out_buffers;
for (size_t i = 0; i < column_types.size(); ++i) {
out_buffers.emplace_back(column_types[i], num_rows, stream, _mr);
}
decode_data(block_data, dict, global_dictionary, total_dictionary_entries,
num_rows, selected_columns, out_buffers, stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(column_types[i], num_rows,
out_buffers[i], stream, _mr));
}
}
}
// Return column names (must match order of returned columns)
metadata_out.column_names.resize(selected_columns.size());
for (size_t i = 0; i < selected_columns.size(); i++) {
metadata_out.column_names[i] = selected_columns[i].second;
}
// Return user metadata
metadata_out.user_data = _metadata->user_data;
return { std::make_unique<table>(std::move(out_columns)), std::move(metadata_out) };
}
// Forward to implementation
reader::reader(std::string filepath, reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(filepath), options, mr)) {
}
// Forward to implementation
reader::reader(const char *buffer, size_t length, reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(buffer, length), options,
mr)) {}
// Forward to implementation
reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(file), options, mr)) {}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read_all(cudaStream_t stream) {
return _impl->read(0, -1, stream);
}
// Forward to implementation
table_with_metadata reader::read_rows(size_type skip_rows,
size_type num_rows,
cudaStream_t stream) {
return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, stream);
}
} // namespace avro
} // namespace detail
} // namespace io
} // namespace experimental
} // namespace cudf
|
ecfd4e2b30b0b2bd35ddf3aeb7a1bcd9c781daeb.hip | // !!! This is a file automatically generated by hipify!!!
/*
Calculate nodal forces induced by a non-singular straight segment of dislocation on a linear rectangular surface element.
Notations and details can be found in S. Queyreau, J. Marian, B.D. Wirth, A. Arsenlis, MSMSE, 22(3):035004, (2014).
Translated from matlab code into C by Daniel Celis Garza.
Parallelised by Daniel Celis Garza
Edits: June 1, 2017.
Parallelisaion: August 31, 2017
*/
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "helper_cuda.h"
#include <mex.h>
#include "vector_utils.h"
#include "serial_forces_lin_rect.h"
#include "cuda_vector_map_utils.h"
#include "cuda_forces_lin_rect.h"
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[]){
// Node arrays from MATLAB. To be mapped into x_se_arr and then passed to d_x_se_arr.
double *dln_node_arr[2], *se_node_arr[n_nodes];
// Variables for the special case where the line segment and surface element are parallel.
double x1[3], x2[3], x3[3], x4[3], x5[3], x6[3], b[3], p[3], q[3], t[3], n[3], p_norm, q_norm;
double *fx[n_nodes];
double ftot[3];
// Burger's vectors array from MATLAB. To be passed straight into d_b_arr.
double *b_arr[1];
// Material properties from MATLAB to be placed into shared memory in device.
double mu, nu, a, a_sq, one_m_nu, factor;
// Nodal force array (3 coordinates per node per SE, 3*n_nodes*n_se). To be inversely mapped to *fx_arr[n_nodes].
double *x_fx_arr;
// Nodal force array to be sent back to MATLAB.
double *fx_arr[n_nodes];
// Total force array on SE (3 coordinates per SE, 3*n_se) to be sent back to MATLAB.
double *ftot_arr, *x_ftot_arr;
// Maps of SE and DLN node arrays.
double *x_se_arr, *x_dln_arr, *x_b_arr;
// Device arrays.
double *d_x_b_arr, *d_x_se_arr, *d_x_dln_arr, *d_fx_arr, *d_ftot_arr;
double eps;
int threads_per_block, blocks_per_grid, n_se, n_dln, para_scheme;
//int debug = 1;
//while(debug == 1){}
hipSetDevice(0);
// Stagger cuda function calls to take advantage of asynchronous calls.
// If memory becomes an issue, make copying x_dln_arr, x_se_arr and x_b_arr to the device a synchronous operation and free the pointers straight after.
n_se = (int) mxGetScalar(prhs[10]);
// Allocate and set forces to 0 in device.
checkCudaErrors( hipMalloc( (void **) &d_fx_arr , 3 * n_se * n_nodes * sizeof(double) ) );
checkCudaErrors( hipMalloc( (void **) &d_ftot_arr, 3 * n_se * sizeof(double) ) );
checkCudaErrors( hipMemsetAsync(d_fx_arr , 0.0, 3 * n_se * n_nodes * sizeof(double)) );
checkCudaErrors( hipMemsetAsync(d_ftot_arr, 0.0, 3 * n_se * sizeof(double)) );
// Execute host code while device sets force arrays to zero.
n_dln = (int) mxGetScalar(prhs[11]);
dln_node_arr[0] = (double *) mxGetPr(prhs[0]);
dln_node_arr[1] = (double *) mxGetPr(prhs[1]);
// Execute host code while copying values from host to device.
se_node_arr[0] = (double *) mxGetPr(prhs[2]);
se_node_arr[1] = (double *) mxGetPr(prhs[3]);
se_node_arr[2] = (double *) mxGetPr(prhs[4]);
se_node_arr[3] = (double *) mxGetPr(prhs[5]);
b_arr[0] = (double *) mxGetPr(prhs[6]);
para_scheme = (int) mxGetScalar(prhs[13]);
eps = (double) mxGetScalar(prhs[14]);
// Map dislocation node arrays to 1D array for parallelisation.
if(para_scheme == 1){
x_dln_arr = element_host_device_map(dln_node_arr, n_dln, 2);
// Allocate and copy values of dislocation nodes to device.
checkCudaErrors( hipMalloc ( (void **) &d_x_dln_arr, 3 * n_dln * 2 * sizeof(double) ) );
checkCudaErrors( hipMemcpyAsync(d_x_dln_arr, x_dln_arr, 3 * n_dln * 2 * sizeof(double), hipMemcpyHostToDevice) );
x_se_arr = se_host_device_map(se_node_arr[0], se_node_arr[1], se_node_arr[2], se_node_arr[3], n_se);
// Allocate and copy values of surface element nodes to device.
checkCudaErrors( hipMalloc ( (void **) &d_x_se_arr, 3 * n_se * n_nodes * sizeof(double) ) );
checkCudaErrors( hipMemcpyAsync(d_x_se_arr, x_se_arr, 3 * n_se * n_nodes * sizeof(double), hipMemcpyHostToDevice) );
// Map Burger's vector array to 1D array for parallelisation.
x_b_arr = element_host_device_map(b_arr, n_dln, 1);
// Allocate and copy values of Burger's vectors to device.
checkCudaErrors( hipMalloc ( (void **) &d_x_b_arr, 3 * n_dln * sizeof(double) ) );
checkCudaErrors( hipMemcpyAsync(d_x_b_arr, x_b_arr, 3 * n_dln * sizeof(double), hipMemcpyHostToDevice) );
}
else{
x_dln_arr = dln_host_device_map(dln_node_arr[0], dln_node_arr[1], n_dln);
// Allocate and copy values of dislocation nodes to device.
checkCudaErrors( hipMalloc ( (void **) &d_x_dln_arr, 3 * n_dln * 2 * sizeof(double) ) );
checkCudaErrors( hipMemcpyAsync(d_x_dln_arr, x_dln_arr, 3 * n_dln * 2 * sizeof(double), hipMemcpyHostToDevice) );
x_se_arr = element_host_device_map(se_node_arr, n_se, n_nodes);
// Allocate and copy values of surface element nodes to device.
checkCudaErrors( hipMalloc ( (void **) &d_x_se_arr, 3 * n_se * n_nodes * sizeof(double) ) );
checkCudaErrors( hipMemcpyAsync(d_x_se_arr, x_se_arr, 3 * n_se * n_nodes * sizeof(double), hipMemcpyHostToDevice) );
x_b_arr = b_host_device_map(b_arr[0], n_dln);
// Allocate and copy values of Burger's vectors to device.
checkCudaErrors( hipMalloc ( (void **) &d_x_b_arr, 3 * n_dln * sizeof(double) ) );
checkCudaErrors( hipMemcpyAsync(d_x_b_arr, x_b_arr, 3 * n_dln * sizeof(double), hipMemcpyHostToDevice) );
}
// Execute host code while copying values from host to device.
// Copy constant values to device.
mu = mxGetScalar(prhs[7]);
checkCudaErrors( hipMemcpyToSymbolAsync(d_mu, &mu, sizeof(mu)) );
nu = mxGetScalar(prhs[8]);
checkCudaErrors( hipMemcpyToSymbolAsync(d_nu, &nu, sizeof(nu)) );
one_m_nu = 1.-nu;
checkCudaErrors( hipMemcpyToSymbolAsync(d_one_m_nu, &one_m_nu, sizeof(one_m_nu)) );
a = mxGetScalar(prhs[9]);
checkCudaErrors( hipMemcpyToSymbolAsync(d_a, &a, sizeof(a)) );
a_sq = a*a;
checkCudaErrors( hipMemcpyToSymbolAsync(d_a_sq, &a_sq, sizeof(a_sq)) );
factor = 0.25*mu/pi/one_m_nu;
checkCudaErrors( hipMemcpyToSymbolAsync(d_factor, &factor, sizeof(factor)) );
checkCudaErrors( hipMemcpyToSymbolAsync(d_eps, &eps, sizeof(eps)) );
// Link force arrays to MATLAB.
plhs[0] = mxCreateDoubleMatrix(3 * n_se, 1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(3 * n_se, 1, mxREAL);
plhs[2] = mxCreateDoubleMatrix(3 * n_se, 1, mxREAL);
plhs[3] = mxCreateDoubleMatrix(3 * n_se, 1, mxREAL);
plhs[4] = mxCreateDoubleMatrix(3 * n_se, 1, mxREAL);
fx_arr[0] = (double *) mxGetPr(plhs[0]);
fx_arr[1] = (double *) mxGetPr(plhs[1]);
fx_arr[2] = (double *) mxGetPr(plhs[2]);
fx_arr[3] = (double *) mxGetPr(plhs[3]);
ftot_arr = (double *) mxGetPr(plhs[4]);
threads_per_block = (int) mxGetScalar(prhs[12]);
// CUDA
if(para_scheme == 1){
blocks_per_grid = ceil((n_dln + threads_per_block - 1) / threads_per_block);
hipLaunchKernelGGL(( dln_cuda_nodal_surface_force_linear_rectangle), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, d_x_dln_arr, d_x_se_arr, d_x_b_arr, d_fx_arr, d_ftot_arr, n_se, n_dln);
}
else{
blocks_per_grid = ceil((n_se + threads_per_block - 1) / threads_per_block);
hipLaunchKernelGGL(( se_cuda_nodal_surface_force_linear_rectangle), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, d_x_dln_arr, d_x_se_arr, d_x_b_arr, d_fx_arr, d_ftot_arr, n_se, n_dln);
}
// Host code is executed asynchronously from the kernel execution.
// Free all 1D arrays used to copy data to device.
free(x_se_arr); free(x_dln_arr); free(x_b_arr);
x_fx_arr = (double *) malloc(3 * n_se * n_nodes * sizeof(double));
// Special case, where dislocation line is parallel with surface element.
// Default behaviour is we take special cases into account.
// Add the custom compiler flag -Dsc at the end of the compilation
// command when wanting to ignore with the special case.
#ifndef sc
int idx1, idx2;
// Initialise forces.
for (int i = 0; i < n_nodes; i++){
fx[i] = (double * ) malloc(3*sizeof(double));
for (int j = 0; j < 3*n_se; j++){
fx_arr[i][j] = 0.0;
}
}
for (int i = 0; i < 3*n_se; i++){
ftot_arr[i] = 0.0;
}
idx1 = 0;
for (int i = 0; i < n_se; i++){
idx2 = 0;
// Transfer rectangular element i's coordinates into x3--x6.
for (int k = 0; k < 3; k++){
x3[k] = se_node_arr[0][idx1+k];
x4[k] = se_node_arr[1][idx1+k];
x5[k] = se_node_arr[2][idx1+k];
x6[k] = se_node_arr[3][idx1+k];
}
// Loop through the dislocation segments.
for (int j = 0; j < n_dln; j++){
// Transfer dislocation segment j's coordinates and burger's vector into x1--x2 and b
for (int k = 0; k < 3; k++){
x1[k] = dln_node_arr[0][idx2+k];
x2[k] = dln_node_arr[1][idx2+k];
b [k] = b_arr[0][idx2+k];
}
init_vector(x1, x2, 3, t);
init_vector2(x3, x4, 3, p, &p_norm);
init_vector2(x3, x5, 3, q, &q_norm);
cross_product(p, q, n);
normalise_vector(n, 3, n);
if (fabs(dot_product(t, n, 3)) <= eps){
nodal_surface_force_linear_rectangle_special(x1, x2, x3, x4, x5, x6, b, t, p, q, n, p_norm, q_norm, mu, nu, a, a_sq, one_m_nu, factor/p_norm/q_norm, fx, ftot);
// Add the force contributions for segment j to the surface element i.
for (int k = 0; k < 3; k++){
fx_arr[0][idx1+k] += fx[0][k];
fx_arr[1][idx1+k] += fx[1][k];
fx_arr[2][idx1+k] += fx[2][k];
fx_arr[3][idx1+k] += fx[3][k];
ftot_arr [idx1+k] += ftot[k];
}
}
idx2 += 3;
}
idx1 += 3;
}
x_ftot_arr = (double *) malloc(3 * n_se * sizeof(double));
// Synchronously copy forces from device to host.
checkCudaErrors( hipMemcpy(x_fx_arr, d_fx_arr, 3 * n_se * n_nodes * sizeof(double), hipMemcpyDeviceToHost) );
if(para_scheme == 1){
// Map 1D device array to 2D array for MATLAB.
dln_add_fx_device_host_map(x_fx_arr, fx_arr, n_se, n_nodes);
// Synchronously copy forces from device to host.
checkCudaErrors( hipMemcpy(x_ftot_arr, d_ftot_arr, 3 * n_se * sizeof(double), hipMemcpyDeviceToHost) );
for (int i = 0; i < 3*n_se; i++){
ftot_arr[i] += x_ftot_arr[i];
}
}
else{
// Map 1D device array to 2D array for MATLAB.
add_fx_device_host_map(x_fx_arr, fx_arr, n_se, n_nodes);
// Synchronously copy forces from device to host.
checkCudaErrors( hipMemcpy(x_ftot_arr, d_ftot_arr, 3 * n_se * sizeof(double), hipMemcpyDeviceToHost) );
add_ftot_device_host_map(x_ftot_arr, ftot_arr, n_se);
}
free(x_ftot_arr);
#endif
// This code snippet ignores the special case where dislocation lines are parallel to the surface element.
// It is only compiled if the flag -Dsc for the special case is present.
#ifdef sc
// Synchronously copy forces from device to host.
checkCudaErrors( hipMemcpy(x_fx_arr, d_fx_arr, 3 * n_se * n_nodes * sizeof(double), hipMemcpyDeviceToHost) );
if(para_scheme == 1){
// Map 1D device array to 2D array for MATLAB.
dln_fx_device_host_map(x_fx_arr, fx_arr, n_se, n_nodes);
checkCudaErrors( hipMemcpy(ftot_arr, d_ftot_arr, 3 * n_se * sizeof(double), hipMemcpyDeviceToHost) );
}
else{
x_ftot_arr = (double *) malloc(3 * n_se * sizeof(double));
fx_device_host_map(x_fx_arr, fx_arr, n_se, n_nodes);
checkCudaErrors( hipMemcpy(x_ftot_arr, d_ftot_arr, 3 * n_se * sizeof(double), hipMemcpyDeviceToHost) );
ftot_device_host_map(x_ftot_arr, ftot_arr, n_se);
free(x_ftot_arr);
}
#endif
for (int i = 0; i < n_nodes; i++){
free(fx[i]);
}
free(x_fx_arr);
// CUDA exit.
hipDeviceReset();
}
| ecfd4e2b30b0b2bd35ddf3aeb7a1bcd9c781daeb.cu | /*
Calculate nodal forces induced by a non-singular straight segment of dislocation on a linear rectangular surface element.
Notations and details can be found in S. Queyreau, J. Marian, B.D. Wirth, A. Arsenlis, MSMSE, 22(3):035004, (2014).
Translated from matlab code into C by Daniel Celis Garza.
Parallelised by Daniel Celis Garza
Edits: June 1, 2017.
Parallelisaion: August 31, 2017
*/
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include "helper_cuda.h"
#include <mex.h>
#include "vector_utils.h"
#include "serial_forces_lin_rect.h"
#include "cuda_vector_map_utils.h"
#include "cuda_forces_lin_rect.h"
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[]){
// Node arrays from MATLAB. To be mapped into x_se_arr and then passed to d_x_se_arr.
double *dln_node_arr[2], *se_node_arr[n_nodes];
// Variables for the special case where the line segment and surface element are parallel.
double x1[3], x2[3], x3[3], x4[3], x5[3], x6[3], b[3], p[3], q[3], t[3], n[3], p_norm, q_norm;
double *fx[n_nodes];
double ftot[3];
// Burger's vectors array from MATLAB. To be passed straight into d_b_arr.
double *b_arr[1];
// Material properties from MATLAB to be placed into shared memory in device.
double mu, nu, a, a_sq, one_m_nu, factor;
// Nodal force array (3 coordinates per node per SE, 3*n_nodes*n_se). To be inversely mapped to *fx_arr[n_nodes].
double *x_fx_arr;
// Nodal force array to be sent back to MATLAB.
double *fx_arr[n_nodes];
// Total force array on SE (3 coordinates per SE, 3*n_se) to be sent back to MATLAB.
double *ftot_arr, *x_ftot_arr;
// Maps of SE and DLN node arrays.
double *x_se_arr, *x_dln_arr, *x_b_arr;
// Device arrays.
double *d_x_b_arr, *d_x_se_arr, *d_x_dln_arr, *d_fx_arr, *d_ftot_arr;
double eps;
int threads_per_block, blocks_per_grid, n_se, n_dln, para_scheme;
//int debug = 1;
//while(debug == 1){}
cudaSetDevice(0);
// Stagger cuda function calls to take advantage of asynchronous calls.
// If memory becomes an issue, make copying x_dln_arr, x_se_arr and x_b_arr to the device a synchronous operation and free the pointers straight after.
n_se = (int) mxGetScalar(prhs[10]);
// Allocate and set forces to 0 in device.
checkCudaErrors( cudaMalloc( (void **) &d_fx_arr , 3 * n_se * n_nodes * sizeof(double) ) );
checkCudaErrors( cudaMalloc( (void **) &d_ftot_arr, 3 * n_se * sizeof(double) ) );
checkCudaErrors( cudaMemsetAsync(d_fx_arr , 0.0, 3 * n_se * n_nodes * sizeof(double)) );
checkCudaErrors( cudaMemsetAsync(d_ftot_arr, 0.0, 3 * n_se * sizeof(double)) );
// Execute host code while device sets force arrays to zero.
n_dln = (int) mxGetScalar(prhs[11]);
dln_node_arr[0] = (double *) mxGetPr(prhs[0]);
dln_node_arr[1] = (double *) mxGetPr(prhs[1]);
// Execute host code while copying values from host to device.
se_node_arr[0] = (double *) mxGetPr(prhs[2]);
se_node_arr[1] = (double *) mxGetPr(prhs[3]);
se_node_arr[2] = (double *) mxGetPr(prhs[4]);
se_node_arr[3] = (double *) mxGetPr(prhs[5]);
b_arr[0] = (double *) mxGetPr(prhs[6]);
para_scheme = (int) mxGetScalar(prhs[13]);
eps = (double) mxGetScalar(prhs[14]);
// Map dislocation node arrays to 1D array for parallelisation.
if(para_scheme == 1){
x_dln_arr = element_host_device_map(dln_node_arr, n_dln, 2);
// Allocate and copy values of dislocation nodes to device.
checkCudaErrors( cudaMalloc ( (void **) &d_x_dln_arr, 3 * n_dln * 2 * sizeof(double) ) );
checkCudaErrors( cudaMemcpyAsync(d_x_dln_arr, x_dln_arr, 3 * n_dln * 2 * sizeof(double), cudaMemcpyHostToDevice) );
x_se_arr = se_host_device_map(se_node_arr[0], se_node_arr[1], se_node_arr[2], se_node_arr[3], n_se);
// Allocate and copy values of surface element nodes to device.
checkCudaErrors( cudaMalloc ( (void **) &d_x_se_arr, 3 * n_se * n_nodes * sizeof(double) ) );
checkCudaErrors( cudaMemcpyAsync(d_x_se_arr, x_se_arr, 3 * n_se * n_nodes * sizeof(double), cudaMemcpyHostToDevice) );
// Map Burger's vector array to 1D array for parallelisation.
x_b_arr = element_host_device_map(b_arr, n_dln, 1);
// Allocate and copy values of Burger's vectors to device.
checkCudaErrors( cudaMalloc ( (void **) &d_x_b_arr, 3 * n_dln * sizeof(double) ) );
checkCudaErrors( cudaMemcpyAsync(d_x_b_arr, x_b_arr, 3 * n_dln * sizeof(double), cudaMemcpyHostToDevice) );
}
else{
x_dln_arr = dln_host_device_map(dln_node_arr[0], dln_node_arr[1], n_dln);
// Allocate and copy values of dislocation nodes to device.
checkCudaErrors( cudaMalloc ( (void **) &d_x_dln_arr, 3 * n_dln * 2 * sizeof(double) ) );
checkCudaErrors( cudaMemcpyAsync(d_x_dln_arr, x_dln_arr, 3 * n_dln * 2 * sizeof(double), cudaMemcpyHostToDevice) );
x_se_arr = element_host_device_map(se_node_arr, n_se, n_nodes);
// Allocate and copy values of surface element nodes to device.
checkCudaErrors( cudaMalloc ( (void **) &d_x_se_arr, 3 * n_se * n_nodes * sizeof(double) ) );
checkCudaErrors( cudaMemcpyAsync(d_x_se_arr, x_se_arr, 3 * n_se * n_nodes * sizeof(double), cudaMemcpyHostToDevice) );
x_b_arr = b_host_device_map(b_arr[0], n_dln);
// Allocate and copy values of Burger's vectors to device.
checkCudaErrors( cudaMalloc ( (void **) &d_x_b_arr, 3 * n_dln * sizeof(double) ) );
checkCudaErrors( cudaMemcpyAsync(d_x_b_arr, x_b_arr, 3 * n_dln * sizeof(double), cudaMemcpyHostToDevice) );
}
// Execute host code while copying values from host to device.
// Copy constant values to device.
mu = mxGetScalar(prhs[7]);
checkCudaErrors( cudaMemcpyToSymbolAsync(d_mu, &mu, sizeof(mu)) );
nu = mxGetScalar(prhs[8]);
checkCudaErrors( cudaMemcpyToSymbolAsync(d_nu, &nu, sizeof(nu)) );
one_m_nu = 1.-nu;
checkCudaErrors( cudaMemcpyToSymbolAsync(d_one_m_nu, &one_m_nu, sizeof(one_m_nu)) );
a = mxGetScalar(prhs[9]);
checkCudaErrors( cudaMemcpyToSymbolAsync(d_a, &a, sizeof(a)) );
a_sq = a*a;
checkCudaErrors( cudaMemcpyToSymbolAsync(d_a_sq, &a_sq, sizeof(a_sq)) );
factor = 0.25*mu/pi/one_m_nu;
checkCudaErrors( cudaMemcpyToSymbolAsync(d_factor, &factor, sizeof(factor)) );
checkCudaErrors( cudaMemcpyToSymbolAsync(d_eps, &eps, sizeof(eps)) );
// Link force arrays to MATLAB.
plhs[0] = mxCreateDoubleMatrix(3 * n_se, 1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(3 * n_se, 1, mxREAL);
plhs[2] = mxCreateDoubleMatrix(3 * n_se, 1, mxREAL);
plhs[3] = mxCreateDoubleMatrix(3 * n_se, 1, mxREAL);
plhs[4] = mxCreateDoubleMatrix(3 * n_se, 1, mxREAL);
fx_arr[0] = (double *) mxGetPr(plhs[0]);
fx_arr[1] = (double *) mxGetPr(plhs[1]);
fx_arr[2] = (double *) mxGetPr(plhs[2]);
fx_arr[3] = (double *) mxGetPr(plhs[3]);
ftot_arr = (double *) mxGetPr(plhs[4]);
threads_per_block = (int) mxGetScalar(prhs[12]);
// CUDA
if(para_scheme == 1){
blocks_per_grid = ceil((n_dln + threads_per_block - 1) / threads_per_block);
dln_cuda_nodal_surface_force_linear_rectangle<<<blocks_per_grid, threads_per_block>>>(d_x_dln_arr, d_x_se_arr, d_x_b_arr, d_fx_arr, d_ftot_arr, n_se, n_dln);
}
else{
blocks_per_grid = ceil((n_se + threads_per_block - 1) / threads_per_block);
se_cuda_nodal_surface_force_linear_rectangle<<<blocks_per_grid, threads_per_block>>>(d_x_dln_arr, d_x_se_arr, d_x_b_arr, d_fx_arr, d_ftot_arr, n_se, n_dln);
}
// Host code is executed asynchronously from the kernel execution.
// Free all 1D arrays used to copy data to device.
free(x_se_arr); free(x_dln_arr); free(x_b_arr);
x_fx_arr = (double *) malloc(3 * n_se * n_nodes * sizeof(double));
// Special case, where dislocation line is parallel with surface element.
// Default behaviour is we take special cases into account.
// Add the custom compiler flag -Dsc at the end of the compilation
// command when wanting to ignore with the special case.
#ifndef sc
int idx1, idx2;
// Initialise forces.
for (int i = 0; i < n_nodes; i++){
fx[i] = (double * ) malloc(3*sizeof(double));
for (int j = 0; j < 3*n_se; j++){
fx_arr[i][j] = 0.0;
}
}
for (int i = 0; i < 3*n_se; i++){
ftot_arr[i] = 0.0;
}
idx1 = 0;
for (int i = 0; i < n_se; i++){
idx2 = 0;
// Transfer rectangular element i's coordinates into x3--x6.
for (int k = 0; k < 3; k++){
x3[k] = se_node_arr[0][idx1+k];
x4[k] = se_node_arr[1][idx1+k];
x5[k] = se_node_arr[2][idx1+k];
x6[k] = se_node_arr[3][idx1+k];
}
// Loop through the dislocation segments.
for (int j = 0; j < n_dln; j++){
// Transfer dislocation segment j's coordinates and burger's vector into x1--x2 and b
for (int k = 0; k < 3; k++){
x1[k] = dln_node_arr[0][idx2+k];
x2[k] = dln_node_arr[1][idx2+k];
b [k] = b_arr[0][idx2+k];
}
init_vector(x1, x2, 3, t);
init_vector2(x3, x4, 3, p, &p_norm);
init_vector2(x3, x5, 3, q, &q_norm);
cross_product(p, q, n);
normalise_vector(n, 3, n);
if (fabs(dot_product(t, n, 3)) <= eps){
nodal_surface_force_linear_rectangle_special(x1, x2, x3, x4, x5, x6, b, t, p, q, n, p_norm, q_norm, mu, nu, a, a_sq, one_m_nu, factor/p_norm/q_norm, fx, ftot);
// Add the force contributions for segment j to the surface element i.
for (int k = 0; k < 3; k++){
fx_arr[0][idx1+k] += fx[0][k];
fx_arr[1][idx1+k] += fx[1][k];
fx_arr[2][idx1+k] += fx[2][k];
fx_arr[3][idx1+k] += fx[3][k];
ftot_arr [idx1+k] += ftot[k];
}
}
idx2 += 3;
}
idx1 += 3;
}
x_ftot_arr = (double *) malloc(3 * n_se * sizeof(double));
// Synchronously copy forces from device to host.
checkCudaErrors( cudaMemcpy(x_fx_arr, d_fx_arr, 3 * n_se * n_nodes * sizeof(double), cudaMemcpyDeviceToHost) );
if(para_scheme == 1){
// Map 1D device array to 2D array for MATLAB.
dln_add_fx_device_host_map(x_fx_arr, fx_arr, n_se, n_nodes);
// Synchronously copy forces from device to host.
checkCudaErrors( cudaMemcpy(x_ftot_arr, d_ftot_arr, 3 * n_se * sizeof(double), cudaMemcpyDeviceToHost) );
for (int i = 0; i < 3*n_se; i++){
ftot_arr[i] += x_ftot_arr[i];
}
}
else{
// Map 1D device array to 2D array for MATLAB.
add_fx_device_host_map(x_fx_arr, fx_arr, n_se, n_nodes);
// Synchronously copy forces from device to host.
checkCudaErrors( cudaMemcpy(x_ftot_arr, d_ftot_arr, 3 * n_se * sizeof(double), cudaMemcpyDeviceToHost) );
add_ftot_device_host_map(x_ftot_arr, ftot_arr, n_se);
}
free(x_ftot_arr);
#endif
// This code snippet ignores the special case where dislocation lines are parallel to the surface element.
// It is only compiled if the flag -Dsc for the special case is present.
#ifdef sc
// Synchronously copy forces from device to host.
checkCudaErrors( cudaMemcpy(x_fx_arr, d_fx_arr, 3 * n_se * n_nodes * sizeof(double), cudaMemcpyDeviceToHost) );
if(para_scheme == 1){
// Map 1D device array to 2D array for MATLAB.
dln_fx_device_host_map(x_fx_arr, fx_arr, n_se, n_nodes);
checkCudaErrors( cudaMemcpy(ftot_arr, d_ftot_arr, 3 * n_se * sizeof(double), cudaMemcpyDeviceToHost) );
}
else{
x_ftot_arr = (double *) malloc(3 * n_se * sizeof(double));
fx_device_host_map(x_fx_arr, fx_arr, n_se, n_nodes);
checkCudaErrors( cudaMemcpy(x_ftot_arr, d_ftot_arr, 3 * n_se * sizeof(double), cudaMemcpyDeviceToHost) );
ftot_device_host_map(x_ftot_arr, ftot_arr, n_se);
free(x_ftot_arr);
}
#endif
for (int i = 0; i < n_nodes; i++){
free(fx[i]);
}
free(x_fx_arr);
// CUDA exit.
cudaDeviceReset();
}
|
1b19284e9edd672b89696dd4d6e29dc18330e04f.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<uchar3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| 1b19284e9edd672b89696dd4d6e29dc18330e04f.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<uchar3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
379fb9d4763b3160d94b47bd169ebad93f833db4.hip | // !!! This is a file automatically generated by hipify!!!
//P2P Synchronization using events and Seperate streams. Coupling-Overlapping several Exchanges together.
#include <omp.h>
#include "tinyxml.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "testMultiGPU_Jacobi2D_Decom.cuh"
#include "multiGPU_topology_optimize.cuh"
#include <iostream>
#include <chrono>
#include <memory>
#include <vector>
//#include<map>
#include <fstream>
#include <hip/hip_vector_types.h>
#define IMUL(a,b) __mul24(a,b)
#define DIVRND(a,b) ((a+b-1)/b)
#define BLOCKSIZE_X 32
#define BLOCKSIZE_Y 16
using namespace std;
using namespace std::chrono;
//hipError_t performMultiGPUJacobi();
//Support for below c++14 on *nix
template<typename T, typename ...Args>
std::unique_ptr<T> make_unique(Args&& ...args)
{
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
struct create_Device
{
int deviceID;
//In a GPU topology set the GPU position
int devicePosition_X;
int devicePosition_Y;
int devicePosition_Z;
vector<float> eHalo;
vector<float> wHalo;
vector<float> nHalo;
vector<float> sHalo;
//Flags check the halos needed by the device
int eHalo_flag = 0;
int wHalo_flag = 0;
int nHalo_flag = 0;
int sHalo_flag = 0;
};
//Simple Jacobi iteration
__global__ void jacobi_Simple(const float *A0, const float *A1, const float *A2, const float *A3, const float *A4, float *x_in, float *x_out, const float *rhs, const int ehalo_flag, const int whalo_flag, const int nhalo_flag, const int shalo_flag, float *ehalo, float *whalo, float *nhalo, float *shalo, const int deviceID, const int numDevices, const int domain_Decom, int2 dim)
{
int2 pos = make_int2(
blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y
);
int index = (pos.y * dim.x) + pos.x;
//int index = threadIdx.x + blockDim.x * blockIdx.x;
float result = rhs[index];
int dim_x = dim.x;
int dim_y = dim.y;
//X_pos and Y_pos are just to understand the thread layout. Can be named to any suitable variable names
int x_pos = pos.y;
int y_pos = pos.x;
int leftBoundaryElem = x_pos * (dim_x);
int rightBoundaryElem = (x_pos * dim_x) + (dim_x - 1);
int topBoundaryElem = y_pos + ((dim_y - 1) * (dim_x));
int bottomBoundaryElem = y_pos;
/*if((deviceID==2)&&(index==leftBoundaryElem))
{
printf("For Device %d index is : %d\n", deviceID, index);
printf("For Device %d leftBoundaryElem is : %d\n", deviceID, leftBoundaryElem);
printf("rightBoundaryElem is : %d\n", rightBoundaryElem);
printf("topBoundaryElem is : %d\n", topBoundaryElem);
printf("bottomBoundaryElem is : %d\n", bottomBoundaryElem);
}*/
//Halo computation for 1D Decompostion: For the First and Last GPU Halo computation on both the sides(nhalo and shalo wont be needed)
//======Left Bounday Elem
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
//Computation using the Halos
if (index == leftBoundaryElem) {
if (whalo_flag == 1) {
result -= A1[index] * whalo[x_pos];
}
}
//======Right Bounday Elem
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index == rightBoundaryElem) {
if (ehalo_flag == 1) {
result -= A3[index] * ehalo[x_pos];
}
}
//======Bottom Bounday Elem
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index == bottomBoundaryElem) {
if (shalo_flag == 1) {
result -= A0[index] * shalo[y_pos];
}
}
//======Top Bounday Elem
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
if (index == topBoundaryElem) {
if (nhalo_flag == 1) {
result -= A4[index] * nhalo[y_pos];
}
}
result /= A2[index];
x_out[index] = result;
//Updating Halos at the End of the computation
if (index == topBoundaryElem) {
if (nhalo_flag == 1) {
nhalo[y_pos] = result;
}
}
if (index == bottomBoundaryElem) {
if (shalo_flag == 1) {
shalo[y_pos] = result;
}
}
if (index == leftBoundaryElem) {
if (whalo_flag == 1) {
whalo[x_pos] = result;
}
}
if (index == rightBoundaryElem) {
if (ehalo_flag == 1) {
ehalo[x_pos] = result;
}
}
}
//========================MultiGPU utility functions============================================================================
// load the named file and dump its structure to STDOUT
void getConfiguration(const char* pFilename, int &numDevices, int &domain_decom)
{
TiXmlDocument doc(pFilename);
bool loadOkay = doc.LoadFile();
if (loadOkay)
{
cout << "\nFile Loaded successfully\n";
TiXmlElement *pRoot = doc.RootElement();
TiXmlElement *element = pRoot->FirstChildElement();
while (element)
{
string elementName = element->Value();
string attribute = element->Attribute("name"); //Gets you the time variable
string value = element->GetText();
cout << "\n The attribute is " << attribute;
cout << "\n The elementName is " << elementName;
cout << "\n The element Value is " << value;
if (attribute == "numDevices") {
numDevices = stoi(value);
}
if (attribute == "decomposition") {
domain_decom = stoi(value);
}
element = element->NextSiblingElement();
}
}
else
{
cout << "\nCould not load config file\n";
}
}
void checkP2Paccess(int numGPUs)
{
for (int i = 0; i<numGPUs; i++)
{
hipSetDevice(i);
for (int j = 0; j<numGPUs; j++)
{
int access;
if (i != j)
{
hipDeviceCanAccessPeer(&access, i, j);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed : " << hipGetErrorString(err) << endl;
return;
}
}
}
}
cout << "\n***NOTE: In case a device doesn't have P2P access to other one, it falls back to normal memcopy procedure.\nSo you can see lesser Bandwidth (GB/s) in those cases.\n\n";
}
bool enableP2P(int numGPUs)
{
for (int i = 0; i<numGPUs; i++)
{
hipSetDevice(i);
for (int j = 0; j<numGPUs; j++)
{
int access;
hipDeviceCanAccessPeer(&access, i, j);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed while enabling: " << hipGetErrorString(err) << endl;
return false;
}
if (access)
{
hipDeviceEnablePeerAccess(j, 0);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed while enabling: " << hipGetErrorString(err) << endl;
return false;
}
}
}
}
return true;
}
void disableP2P(int numGPUs)
{
for (int i = 0; i<numGPUs; i++)
{
hipSetDevice(i);
for (int j = 0; j<numGPUs; j++)
{
int access;
hipDeviceCanAccessPeer(&access, i, j);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed while disabling : " << hipGetErrorString(err) << endl;
return;
}
if (access)
{
hipDeviceDisablePeerAccess(j);
if (auto err = hipGetLastError())
{
cout << "P2P Operations failed while disabling: " << hipGetErrorString(err) << endl;
return;
}
}
}
}
}
void performFactorPairing(int numDevices, int &fact_x, int &fact_y)
{
int i;
//Check if numDevices is Prime
bool isPrime = true;
for (i = 2; i < numDevices / 2; ++i)
{
if (numDevices % i == 0)
{
isPrime = false;
break;
}
}
if (isPrime)
{
fact_x = numDevices;
fact_y = 1;
}
else
{
//Finding the appropriate factor pairs to divide the grid
for (i = 2; i < numDevices / 2; ++i)
{
if (numDevices % i == 0) {
fact_x = i;
fact_y = numDevices / i;
}
}
}
}
//===============================================================================================================================
//====================================Creating Topology with the number of Devices available====================================
void generateGPUGRID(int numDevices, int &numberOfDevicesAlong_X, int &numberOfDevicesAlong_Y, int domainDecomType)
{
//Finding GPU topology along x and y
//Assumuing total number of devices is a perfect square(To be changed later)
if (domainDecomType == 1)
{
numberOfDevicesAlong_X = numDevices;
numberOfDevicesAlong_Y = 1;
}
else
{
int val = -1;
val = (int)sqrt(numDevices);
if ((val*val) == numDevices)
{
numberOfDevicesAlong_X = val;
numberOfDevicesAlong_Y = val;
}
else
{
int fact_x = 1;
int fact_y = 1;
performFactorPairing(numDevices, fact_x, fact_y);
numberOfDevicesAlong_X = fact_x;
numberOfDevicesAlong_Y = fact_y;
}
}
}
/* Creates a topology for a number of devices in a system
for ex. The devices are aware of left, right, top and bottom neigbours in 2D
1. It also decides the chunk per devices by determining x-dimension and y-dimensions for per chunk of data per device.
2. It also initializes halos for each devices which can be exchanged with the neighbours
*/
void createTopology(int numDevices, vector<create_Device> &deviceArray, int numberOfDevicesAlong_X, int numberOfDevicesAlong_Y)
{
deviceArray.resize(numDevices);
unsigned int deviceCount = 0;
for (int gridCount_Y = 0; gridCount_Y < numberOfDevicesAlong_Y; gridCount_Y++) {
for (int gridCount_X = 0; gridCount_X < numberOfDevicesAlong_X; gridCount_X++) {
deviceArray[deviceCount].deviceID = deviceCount;
deviceArray[deviceCount].devicePosition_X = gridCount_X;
deviceArray[deviceCount].devicePosition_Y = gridCount_Y;
//devicePosition_Z to be changed later
deviceArray[deviceCount].devicePosition_Z = 1;
deviceCount++;
}
}
}
//==============================================================================================================================
//Init Halos: In 1D decomposition only North and South Halos are used. In 2D decomposition North, South, East and West Halo need to be initialized and computed
//TODO:Create a Halo Exchange Mechanism for 2D Multi GPU topology
void initHalos2D(create_Device &device, int chunk_X, int chunk_Y, float *vec_in, int maxdevicesAlong_X, int maxDevicesAlong_Y, int rowStartPos, int rowEndPos, int dim)
{
/*cout << endl << "Inside Halo Computation 2D. printing Details";
cout << endl << "Device ID " << device.deviceID;
cout << endl << "Device position X " << device.devicePosition_X;
cout << endl << "Device position Y " << device.devicePosition_Y;
cout << endl << "Row Start " << rowStartPos;
cout << endl << "Row End " << rowEndPos;*/
//Assigning counter for each individual Halos. To prevent update of the same counter
//int rowStartPosEast = rowStartPos;
int rowStartPosWest = rowStartPos;
int rowStartPosNorth = rowStartPos;
int rowStartPosSouth = rowStartPos;
int rowEndPosEast = rowEndPos;
//int rowEndPosWest = rowEndPos;
//int rowEndPosNorth = rowEndPos;
//int rowEndPosSouth = rowEndPos;
//Checks provided for Boundary devices in GPU topology
if ((device.devicePosition_X - 1) >= 0) {
//cout << "West Halo needed ";
device.wHalo_flag = 1;
device.wHalo.resize(chunk_Y);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
device.wHalo[rowNum] = vec_in[rowStartPosWest];
//cout << rowStartPosWest << " ";
rowStartPosWest += dim;
}
}
if ((device.devicePosition_X + 1) < maxdevicesAlong_X) {
//cout << "East Halo needed ";
device.eHalo_flag = 1;
device.eHalo.resize(chunk_Y);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
device.eHalo[rowNum] = vec_in[rowEndPosEast];
//cout << rowEndPosEast << " ";
rowEndPosEast += dim;
}
}
if ((device.devicePosition_Y - 1) >= 0) {
//cout << "South Halo needed ";
device.sHalo_flag = 1;
device.sHalo.resize(chunk_X);
for (int rowNum = 0; rowNum < chunk_X; rowNum++)
{
device.sHalo[rowNum] = vec_in[rowStartPosSouth];
//cout << rowStartPosSouth << " ";
rowStartPosSouth++;
}
}
if ((device.devicePosition_Y + 1) < maxDevicesAlong_Y) {
//cout << "North Halo needed ";
device.nHalo_flag = 1;
device.nHalo.resize(chunk_X);
rowStartPosNorth = rowStartPosNorth + (dim * (chunk_Y - 1));
for (int rowNum = 0; rowNum < chunk_X; rowNum++)
{
device.nHalo[rowNum] = vec_in[rowStartPosNorth];
//cout << rowStartPosNorth << " ";
rowStartPosNorth++;
}
}
}
//======================================Exchange Halos: on Host==============================================
int getDeviceIDfromCoord(int devCoord_x, int devCoord_y, int numberofDevicesAlong_X) {
int devID = (devCoord_y * numberofDevicesAlong_X) + devCoord_x;
return devID;
}
void exchangehalos_onHost(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X)
{
//Halos exist in pairs so:
//Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs
//That way the number of exchanges are kept to minimum
for (int dev = 0; dev < numDevices; dev++)
{
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1) {
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberofDevicesAlong_X);
//Exchange Halos
(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberofDevicesAlong_X);
//Exchange Halos
(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo);
}
}
}
bool exchangehalos_onHostPinned(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X, vector<float*> &nHalosPinned, vector<float*> &sHalosPinned, vector<float*> &eHalosPinned, vector<float*> &wHalosPinned)
{
//Halos exist in pairs so:
//Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs
//That way the number of exchanges are kept to minimum
for (int dev = 0; dev < numDevices; dev++)
{
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1) {
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberofDevicesAlong_X);
//Exchange Halos
//(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo);
swap(nHalosPinned[dev], sHalosPinned[devIDtoNorth]);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberofDevicesAlong_X);
//Exchange Halos
//(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo);
swap(eHalosPinned[dev], wHalosPinned[devIDtoEast]);
}
}
return true;
}
//===========================Exchange Halos: on Host Ends=====================================================
//Init matrix Diagonals A0, A1, A2, A3, A4
void copyValues(float *A0, float *A1, float *A2, float *A3, float *A4, float *rhs, float *vec_in, float *vec_out, int dim, float *val_A0, float *val_A1, float *val_A2, float *val_A3, float *val_A4, float *val_rhs, float *val_x_in)
{
unsigned int size = dim * dim;
for (unsigned int i = 0; i < size; i++)
{
A0[i] = val_A0[i];
A1[i] = val_A1[i];
A2[i] = val_A2[i];
A3[i] = val_A3[i];
A4[i] = val_A4[i];
rhs[i] = val_rhs[i];
vec_in[i] = val_x_in[i];
vec_out[i] = 0.0f;
}
}
void getAllDeviceProperties() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
cout << " Device Number: " << i << endl;
cout << " Device name: " << prop.name << endl;
cout << " Memory Clock Rate (KHz): " << prop.memoryClockRate << endl;
cout << " Memory Bus Width (bits): " << prop.memoryBusWidth << endl;;
cout << " Peak Memory Bandwidth (GB/s): " << 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6 << endl << endl << endl;
}
}
/* Prints an output file for checking results */
void sendToPrint(float *partial_result, int devicePosition_X, int devicePosition_Y, int numberOfDevicesAlong_X, int chunk_X, int chunk_Y, int dim, int totalSize, vector<float> &result, int numDevices, int currentIteration, int numberOfTotalIterations) {
int devicePosX = devicePosition_X;
int devicePosY = devicePosition_Y;
//Calculating data position based on device coords
//numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated
//int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X);
int dataStartPos_X = (devicePosY * dim * chunk_Y) + (devicePosX * chunk_X);
int dataEndPos_X = dataStartPos_X + chunk_X;
//One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim )
int rowStartPos = dataStartPos_X;
int rowEndPos = dataEndPos_X;
int indexCounter = 0;
//cout << endl;
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
//Get one complete row for the GPU
for (int pos = rowStartPos; pos < rowEndPos; pos++)
{
result[pos] = partial_result[indexCounter];
indexCounter++;
}
//cout << endl;
rowStartPos += dim;
rowEndPos = rowStartPos + chunk_X;
}
//Printing when the last device computation is done: Remove the check to check computation for each device
int deviceID = getDeviceIDfromCoord(devicePosition_X, devicePosition_Y, numberOfDevicesAlong_X);
if ((deviceID == (numDevices - 1)) && (currentIteration == (numberOfTotalIterations - 1)))
{
ofstream myfile;
myfile.open("data2.txt");
//Printing the values here
for (int i = totalSize; i > 0; i--) {
if (i%dim == 0) {
myfile << endl;
}
myfile << result[i - 1] << " ";
}
myfile.close();
}
}
hipError_t performMultiGPUJacobi(unsigned int val_dim, unsigned int numJacobiIt, float* val_A0, float* val_A1, float* val_A2, float* val_A3, float* val_A4, float* val_rhs, float* val_x_in)
{
//Fixed value changed later
int dim = 8;
if (val_dim != 0) {
dim = val_dim;
}
//TODO: write a 2D domain decomposition method for more than 2 GPUs
int size = dim * dim;
//auto result = make_unique<float[]>(size);
//Create Diagonal Vectors
std::vector<float> a0(size);
std::vector<float> a1(size);
std::vector<float> a2(size);
std::vector<float> a3(size);
std::vector<float> a4(size);
std::vector<float> vec_in(size);
std::vector<float> vec_out(size);
std::vector<float> rhs(size);
std::vector<float> result(size);
//Get the total number of devices
int numDevices = -1;
hipGetDeviceCount(&numDevices);
//numDevices = 2;
//Set Decomposition dimension 1D or 2D: when decomposition is 0. Computation happens on a single GPU
int decom_Dim = 2;
//Set Values for Domain Decompostion type 1D or 2D
int domainDecom_Dim = decom_Dim;
//Read the custom config defined in file "multiGPUConfig.xml"
getConfiguration("multiGPUConfig.xml", numDevices, domainDecom_Dim);
cout << endl << "Total number of Devices in the System are : " << numDevices << endl;
getAllDeviceProperties();
//Creating a Default topology mapping (without optimization) with the number of Devices specified in the configuration
map<int, int> gpuTopology;
for (int dev = 0; dev<numDevices; dev++)
{
gpuTopology[dev] = dev;
}
//Force Topology Testing Set 1
/* gpuTopology[0] = 3;
gpuTopology[1] = 1;
gpuTopology[2] = 2;
gpuTopology[3] = 0; */
//Force Topology Testing Set 2
/*gpuTopology[0] = 3;
gpuTopology[1] = 0;
gpuTopology[2] = 2;
gpuTopology[3] = 1; */
//Force Topology Testing Set 3 -- Gives the worst Configuration
gpuTopology[0] = 1;
gpuTopology[1] = 3;
gpuTopology[2] = 2;
gpuTopology[3] = 0;
//Force Topology Testing Set 4 --Good Config-- as GPUs on same chip are neighbours
/* gpuTopology[0] = 3;
gpuTopology[1] = 2;
gpuTopology[2] = 1;
gpuTopology[3] = 0; */
//Configuring the number of GPU's manually
//numDevices=2;
copyValues(&a0[0], &a1[0], &a2[0], &a3[0], &a4[0], &rhs[0], &vec_in[0], &vec_out[0], dim, &val_A0[0], &val_A1[0], &val_A2[0], &val_A3[0], &val_A4[0], &val_rhs[0], &val_x_in[0]);
vector<create_Device> deviceArray;
/* Distributed Compuation using Halos: Algorithm
1. Init Halos.
1.a) In 1D decomposition nhalo and shalo intialized from vector x_in
1.b) In 2D decompsition nhalo,shalo, ehalo and whalo initialozed from vector x_in
2. Pass the halos to Jacobi_kernal.
3. Store the result computed at the boundary into the halo boundary positions.
4. Swap nhalo and shalo pairs in 1D decompostion. Swap (nhalo,shalo) and (ehalo,whalo) in 2D.
*/
//=================================Domain Decomposition Logic Starts=================================================================
/*Generating a GPU Grid with multiple GPUs and creating a Topology*/
int numberOfDevicesAlong_X = 1;
int numberOfDevicesAlong_Y = 1;
generateGPUGRID(numDevices, numberOfDevicesAlong_X, numberOfDevicesAlong_Y, domainDecom_Dim);
cout << "GPU grid structure is : " << numberOfDevicesAlong_X << " X " << numberOfDevicesAlong_Y << endl;
//Total elements along each dim in 2D
int chunk_X = dim / numberOfDevicesAlong_X;
int chunk_Y = dim / numberOfDevicesAlong_Y;
/* Creating a GPU topology with multiple devices*/
createTopology(numDevices, deviceArray, numberOfDevicesAlong_X, numberOfDevicesAlong_Y);
/*Optimize topology 1D or 2D. Depending upon the required domain division */
if (domainDecom_Dim == 1)
{
gpuTopology = outputLatencyMatrix(numDevices, false, numberOfDevicesAlong_X, numberOfDevicesAlong_Y, domainDecom_Dim);
}
else//for 2D domain decomposition or higher
{
gpuTopology = outputLatencyMatrix(numDevices, false, numberOfDevicesAlong_X, numberOfDevicesAlong_Y, domainDecom_Dim);
}
//Enable Peer-to-Peer access across all GPUs : Done on phase 2 of development
bool p2penabled = false;
p2penabled = enableP2P(numDevices);
//Let the total number of GPU be 2 : has to be changed later
//Computation divided into (size/2) on first and size-(size/2) on second
std::vector<int> domainDivision(numDevices);
//Logic for total chunk per device (Domain distribution)
for (int i = 0; i < numDevices; i++) {
//Chunk per GPU will be same irrepective of 1D or 2D decomposition
domainDivision[i] = size / numDevices;
}
//For use on Device
std::vector<float*>d_A0(numDevices);
std::vector<float*>d_A1(numDevices);
std::vector<float*>d_A2(numDevices);
std::vector<float*>d_A3(numDevices);
std::vector<float*>d_A4(numDevices);
std::vector<float*>d_Vec_In(numDevices);
std::vector<float*>d_Vec_Out(numDevices);
std::vector<float*>d_nhalos(numDevices);
std::vector<float*>d_shalos(numDevices);
std::vector<float*>d_ehalos(numDevices);
std::vector<float*>d_whalos(numDevices);
std::vector<float*>d_Rhs(numDevices);
//Device Buffers for parallel communication using streams: Concept of Front and Back Buffer Oct 30, 2017
std::vector<float*>x_buffer_north(numDevices);
std::vector<float*>x_buffer_south(numDevices);
std::vector<float*>y_buffer_west(numDevices);
std::vector<float*>y_buffer_east(numDevices);
//Note: Using Pinned memory on Host for Halos -> Performance Approach 1
vector<float*>nHalo_pinned(numDevices);
vector<float*>sHalo_pinned(numDevices);
vector<float*>wHalo_pinned(numDevices);
vector<float*>eHalo_pinned(numDevices);
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(gpuTopology[dev]);
hipHostMalloc((void**)&nHalo_pinned[gpuTopology[dev]], (chunk_X) * sizeof(float));
hipHostMalloc((void**)&sHalo_pinned[gpuTopology[dev]], (chunk_X) * sizeof(float));
hipHostMalloc((void**)&wHalo_pinned[gpuTopology[dev]], (chunk_Y) * sizeof(float));
hipHostMalloc((void**)&eHalo_pinned[gpuTopology[dev]], (chunk_Y) * sizeof(float));
}
for (int dev = 0; dev < numDevices; dev++)
{
//Setting the device before allocation
hipSetDevice(gpuTopology[dev]);
//cudamalloc the Diagonals
hipMalloc((void**)&d_A0[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A1[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A2[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A3[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A4[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
//Using pinned memory as part of performance upgrade- Phase 2 of development
//cudamalloc the Input Vector and Result vector
hipMalloc((void**)&d_Vec_In[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_Vec_Out[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_Rhs[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
//hipMalloc Halos: North and South--1D. TODO: East and West for 2D
hipMalloc((void**)&d_nhalos[gpuTopology[dev]], chunk_X * sizeof(float));
hipMalloc((void**)&d_shalos[gpuTopology[dev]], chunk_X * sizeof(float));
hipMalloc((void**)&d_ehalos[gpuTopology[dev]], chunk_Y * sizeof(float));
hipMalloc((void**)&d_whalos[gpuTopology[dev]], chunk_Y * sizeof(float));
//Buffer memory used for p2p exchange
hipMalloc((void**)&x_buffer_north[gpuTopology[dev]], chunk_X * sizeof(float));
hipMalloc((void**)&x_buffer_south[gpuTopology[dev]], chunk_X * sizeof(float));
hipMalloc((void**)&y_buffer_west[gpuTopology[dev]], chunk_Y * sizeof(float));
hipMalloc((void**)&y_buffer_east[gpuTopology[dev]], chunk_Y * sizeof(float));
}
/* The transfer of Data from Host to Device : Domain Decomposition in 2D*/
if (decom_Dim == 2) {
//Create Partial Diagonal Vectors
//Size per GPU will be
int chunkSize = chunk_X * chunk_Y;
std::vector<float> partial_a0(chunkSize);
std::vector<float> partial_a1(chunkSize);
std::vector<float> partial_a2(chunkSize);
std::vector<float> partial_a3(chunkSize);
std::vector<float> partial_a4(chunkSize);
std::vector<float> partial_vec_in(chunkSize);
std::vector<float> partial_vec_out(chunkSize);
std::vector<float> partial_rhs(chunkSize);
std::vector<float> partial_result(chunkSize);
for (int dev = 0; dev < numDevices; dev++)
{
//Test the properties of the device assigned
//cout << endl << "New Logical Device created " << deviceArray[dev].deviceID;
//cout << endl << "New Logical Device (X,Y) coord (" << deviceArray[dev].devicePosition_X << "," << deviceArray[dev].devicePosition_Y << ")";
//==========Important: Logic for creation of Chunks to be allocated to GPUs==========================================
//Important : Mention about the correlation between the topology and data position in the thesis
int devicePosX = deviceArray[dev].devicePosition_X;
int devicePosY = deviceArray[dev].devicePosition_Y;
//cout << endl << "For Device ID " << deviceArray[dev].deviceID << endl;
//cout << endl << "Device pos X " << devicePosX << endl;
//cout << endl << "Device pos Y " << devicePosY << endl;
//cout << endl << "Chunk X " << chunk_X << endl;
//cout << endl << "Chunk Y " << chunk_Y << endl;
//cout << endl << "Number of device along X " << numberOfDevicesAlong_X << endl;
//cout << endl << "Number of device along Y " << numberOfDevicesAlong_Y << endl;
//Calculating data position based on device coords
//numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated
//int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X);
int dataStartPos_X = (devicePosY * dim * chunk_Y) + (devicePosX * chunk_X);
int dataEndPos_X = dataStartPos_X + chunk_X;
//cout << endl << "Data Start Pos is " << dataStartPos_X << endl;
//cout << endl << "Data End Pos is " << dataEndPos_X << endl;
//One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim )
int rowStartPos = dataStartPos_X;
int rowEndPos = dataEndPos_X;
int indexCounter = 0;
//Initialize Halos
initHalos2D(deviceArray[dev], chunk_X, chunk_Y, &vec_in[0], numberOfDevicesAlong_X, numberOfDevicesAlong_Y, rowStartPos, rowEndPos - 1, dim);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
//cout << endl << "Data Start Pos is " << rowStartPos << endl;
//Get one complete row for the GPU
for (int pos = rowStartPos; pos < rowEndPos; pos++)
{
partial_a0[indexCounter] = a0[pos];
partial_a1[indexCounter] = a1[pos];
partial_a2[indexCounter] = a2[pos];
partial_a3[indexCounter] = a3[pos];
partial_a4[indexCounter] = a4[pos];
partial_vec_in[indexCounter] = vec_in[pos];
partial_vec_out[indexCounter] = vec_out[pos];
partial_rhs[indexCounter] = rhs[pos];
partial_result[indexCounter] = result[pos];
indexCounter++;
}
//cout << endl << "Data End Pos is " << rowEndPos << endl;
rowStartPos += dim;
rowEndPos = rowStartPos + chunk_X;
}
//==========Important: Logic for creation of Chunks to be allocated to GPUs Ends ==========================================
//Setting Cuda device
hipSetDevice(gpuTopology[dev]);
//Copy the diagonals from host to device : calling all at once instead of putting inside the for loop
hipMemcpy(d_A0[gpuTopology[dev]], &partial_a0[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A1[gpuTopology[dev]], &partial_a1[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A2[gpuTopology[dev]], &partial_a2[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A3[gpuTopology[dev]], &partial_a3[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A4[gpuTopology[dev]], &partial_a4[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
//Copy in and out vectors and RHS
hipMemcpy(d_Vec_In[gpuTopology[dev]], &partial_vec_in[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Vec_Out[gpuTopology[dev]], &partial_vec_out[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Rhs[gpuTopology[dev]], &partial_rhs[0], domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
}
if (auto err = hipGetLastError())
{
cout << "Data copy failed 1: " << hipGetErrorString(err) << endl;
return err;
}
//Copy intial Halos in 2D
//Initial Exchange Halos: Then do intial cudaMemcopies
exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X);
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(gpuTopology[dev]);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
hipMemcpy(d_nhalos[gpuTopology[dev]], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].sHalo_flag == 1)
{
hipMemcpy(d_shalos[gpuTopology[dev]], &deviceArray[dev].sHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].eHalo_flag == 1)
{
hipMemcpy(d_ehalos[gpuTopology[dev]], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].wHalo_flag == 1)
{
hipMemcpy(d_whalos[gpuTopology[dev]], &deviceArray[dev].wHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice);
}
}
if (auto err = hipGetLastError())
{
cout << "Halo Copy Failed " << hipGetErrorString(err) << endl;
return err;
}
//Development phase 2 changes : For p2p operation communication initialize buffers
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(gpuTopology[dev]);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
//cout << "Device ID for nHaloFlag is : " << deviceArray[dev].deviceID<<endl;
hipMemcpy(x_buffer_north[gpuTopology[dev]], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].sHalo_flag == 1)
{
//cout << "Device ID for sHaloFlag is : " << deviceArray[dev].deviceID << endl;
hipMemcpy(x_buffer_south[gpuTopology[dev]], &deviceArray[dev].sHalo[0], chunk_X * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].eHalo_flag == 1)
{
//cout << "Device ID for eHaloFlag is : " << deviceArray[dev].deviceID << endl;
hipMemcpy(y_buffer_east[gpuTopology[dev]], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice);
}
if (deviceArray[dev].wHalo_flag == 1)
{
//cout << "Device ID for wHaloFlag is : " << deviceArray[dev].deviceID << endl;
hipMemcpy(y_buffer_west[gpuTopology[dev]], &deviceArray[dev].wHalo[0], chunk_Y * sizeof(float), hipMemcpyHostToDevice);
}
}
}
//=================================Domain Decomposition Logic Ends =================================================================
//=================================Setting up the grids and blocks for kernel launch================================================
//int blocksize = -1;
//int threads = -1;
int2 myDim;
myDim.x = chunk_X;
myDim.y = chunk_Y;
dim3 block(BLOCKSIZE_X, BLOCKSIZE_Y);
dim3 grid(DIVRND(myDim.x, BLOCKSIZE_X), DIVRND(myDim.y, BLOCKSIZE_Y));
//==================================================================================================================================
//Call to kernal
int iterations = 0;
if (numJacobiIt != 0) {
iterations = numJacobiIt;
}
else
{
cout << endl << " No. of iterations is zero exiting... ";
//return;
}
//===========================================CUDA Stream implementation for performance. Phase 2 of Development ====================================================
//===========Algorithm Improvement: Identify the neighbours so that they could be launched together and the exchange can take place. Without having to wait for computation across all devices============================
//hipStream_t streams[4];//Possible to declare it dynamically ? Yes. Using Vectors.
vector<hipStream_t> streams(numDevices);
//vector<hipStream_t> streamsComm(numDevices);
//Create seperate streams for each Halo Exchange
vector<hipStream_t> nHaloExchange(numDevices);
vector<hipStream_t> sHaloExchange(numDevices);
vector<hipStream_t> eHaloExchange(numDevices);
vector<hipStream_t> wHaloExchange(numDevices);
//hipStream_t nHaloExchange[4];
//hipStream_t sHaloExchange[4];
//hipStream_t eHaloExchange[4];
//hipStream_t wHaloExchange[4];
//Note: Default stream for a device is always syncronizing so creating seperate streams for each device
for (int i = 0; i < numDevices; i++)
{
hipSetDevice(gpuTopology[i]);
hipStreamCreate(&streams[gpuTopology[i]]);
//hipStreamCreate(&streamsComm[i]);
if (p2penabled) {
hipStreamCreate(&nHaloExchange[gpuTopology[i]]);
hipStreamCreate(&sHaloExchange[gpuTopology[i]]);
hipStreamCreate(&eHaloExchange[gpuTopology[i]]);
hipStreamCreate(&wHaloExchange[gpuTopology[i]]);
}
}
//For explicit synchornizing p2p transfers and async memcopies
//hipEvent_t events[4];
vector<hipEvent_t> events(numDevices);
vector<hipEvent_t> nHaloEvent(numDevices);
vector<hipEvent_t> sHaloEvent(numDevices);
vector<hipEvent_t> eHaloEvent(numDevices);
vector<hipEvent_t> wHaloEvent(numDevices);
//hipEvent_t nHaloEvent[4];
//hipEvent_t sHaloEvent[4];
//hipEvent_t eHaloEvent[4];
//hipEvent_t wHaloEvent[4];
for (int i = 0; i < numDevices; i++)
{
hipSetDevice(gpuTopology[i]);
hipEventCreate(&events[gpuTopology[i]]);
if (p2penabled) {
hipEventCreate(&nHaloEvent[gpuTopology[i]]);
hipEventCreate(&sHaloEvent[gpuTopology[i]]);
hipEventCreate(&eHaloEvent[gpuTopology[i]]);
hipEventCreate(&wHaloEvent[gpuTopology[i]]);
}
}
/*Using a pagable memory first*/
//std::vector<float> partial_resultOnHost(chunk_X * chunk_Y);
/*Using a pinned(page locked) memory for performance*/
vector<float*>partial_resultOnHost(numDevices);
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(gpuTopology[dev]);
hipHostMalloc((void**)&partial_resultOnHost[dev], (chunk_X * chunk_Y) * sizeof(float));
}
//==============================================================
//Check performance
hipError_t status = hipGetLastError();
high_resolution_clock::time_point t1 = high_resolution_clock::now();
#pragma omp parallel num_threads(numDevices)
{
int dev = omp_get_thread_num();
//hipSetDevice(omp_get_thread_num());
for (int i = 0; i <= iterations; i++)
{
hipSetDevice(gpuTopology[dev]);
#pragma omp barrier
if ((i>0))
{
//As this is not a run on a single host thread race conditions occurs. So have to manage the pointer swapping by creating a copy
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1)
{
swap(x_buffer_north[gpuTopology[dev]], d_nhalos[gpuTopology[dev]]);
}
//Check if device is having a south Halo buffer
if (deviceArray[dev].sHalo_flag == 1)
{
swap(x_buffer_south[gpuTopology[dev]], d_shalos[gpuTopology[dev]]);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1)
{
swap(y_buffer_east[gpuTopology[dev]], d_ehalos[gpuTopology[dev]]);
}
//Check if device is having a west Halo buffer
if (deviceArray[dev].wHalo_flag == 1)
{
swap(y_buffer_west[gpuTopology[dev]], d_whalos[gpuTopology[dev]]);
}
}
jacobi_Simple << <grid, block, 0, streams[gpuTopology[dev]] >> >(d_A0[gpuTopology[dev]], d_A1[gpuTopology[dev]], d_A2[gpuTopology[dev]], d_A3[gpuTopology[dev]], d_A4[gpuTopology[dev]], d_Vec_In[gpuTopology[dev]], d_Vec_Out[gpuTopology[dev]], d_Rhs[gpuTopology[dev]], deviceArray[dev].eHalo_flag, deviceArray[dev].wHalo_flag, deviceArray[dev].nHalo_flag, deviceArray[dev].sHalo_flag, d_ehalos[gpuTopology[dev]], d_whalos[gpuTopology[dev]], d_nhalos[gpuTopology[dev]], d_shalos[gpuTopology[dev]], deviceArray[dev].deviceID, numDevices, decom_Dim, myDim);
//jacobi_Comm << <grid, block, 0, streamsComm[dev] >> >(d_A0[dev], d_A1[dev], d_A2[dev], d_A3[dev], d_A4[dev], d_Vec_In[dev], d_Vec_Out[dev], d_Rhs[dev], deviceArray[dev].eHalo_flag, deviceArray[dev].wHalo_flag, deviceArray[dev].nHalo_flag, deviceArray[dev].sHalo_flag, d_ehalos[dev], d_whalos[dev], d_nhalos[dev], d_shalos[dev], deviceArray[dev].deviceID, numDevices, decom_Dim, myDim);
//For Synchronizing while Halo Exchange start
hipEventRecord(events[gpuTopology[dev]], streams[gpuTopology[dev]]);
swap(d_Vec_In[gpuTopology[dev]], d_Vec_Out[gpuTopology[dev]]);
if (auto err = hipGetLastError())
{
cout << "Data copy failed 2: " << hipGetErrorString(err) << endl;
//return err;
}
//Exchange Halos after each iteration except the last iteration
if ((i < (iterations - 1)))
{
//============Important: Before copying to buffers make sure the kernel on the respective GPU(s) finished execution using hipStreamWaitEvent=======================
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
#pragma omp barrier // Important: To make sure all threads assign proper values to duplicate pointers before Halo Exchange Begins
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1)
{
//hipSetDevice(gpuTopology[dev]);
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberOfDevicesAlong_X);
//Exchange Halos
//Send to the device
hipStreamWaitEvent(nHaloExchange[gpuTopology[dev]], events[gpuTopology[dev]], 0);
hipMemcpyPeerAsync(x_buffer_south[gpuTopology[devIDtoNorth]], gpuTopology[devIDtoNorth], d_nhalos[gpuTopology[dev]], gpuTopology[dev], chunk_X * sizeof(float), nHaloExchange[gpuTopology[dev]]);
hipEventRecord(nHaloEvent[gpuTopology[dev]], nHaloExchange[gpuTopology[dev]]);
//Postpone the next iteration kernel execution till the p2p transfers complete
//hipSetDevice(gpuTopology[devIDtoNorth]);
hipStreamWaitEvent(streams[gpuTopology[devIDtoNorth]], nHaloEvent[gpuTopology[dev]], 0);
}
//Check if device is having a south Halo buffer
if (deviceArray[dev].sHalo_flag == 1)
{
//hipSetDevice(gpuTopology[dev]);
int devIDtoSouth = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y - 1, numberOfDevicesAlong_X);
//Exchange Halos
//Send to the device
hipStreamWaitEvent(sHaloExchange[gpuTopology[dev]], events[gpuTopology[dev]], 0);
hipMemcpyPeerAsync(x_buffer_north[gpuTopology[devIDtoSouth]], gpuTopology[devIDtoSouth], d_shalos[gpuTopology[dev]], gpuTopology[dev], chunk_X * sizeof(float), sHaloExchange[gpuTopology[dev]]);
hipEventRecord(sHaloEvent[gpuTopology[dev]], sHaloExchange[gpuTopology[dev]]);
//Postpone the next iteration kernel execution till the p2p transfers complete
//hipSetDevice(gpuTopology[devIDtoSouth]);
hipStreamWaitEvent(streams[gpuTopology[devIDtoSouth]], sHaloEvent[gpuTopology[dev]], 0);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1)
{
//hipSetDevice(gpuTopology[dev]);
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberOfDevicesAlong_Y);
//Exchange Halos
//Send to the device
hipStreamWaitEvent(eHaloExchange[gpuTopology[dev]], events[gpuTopology[dev]], 0);
hipMemcpyPeerAsync(y_buffer_west[gpuTopology[devIDtoEast]], gpuTopology[devIDtoEast], d_ehalos[gpuTopology[dev]], gpuTopology[dev], chunk_Y * sizeof(float), eHaloExchange[gpuTopology[dev]]);
hipEventRecord(eHaloEvent[gpuTopology[dev]], eHaloExchange[gpuTopology[dev]]);
//Postpone the next iteration kernel execution till the p2p transfers complete
//hipSetDevice(gpuTopology[devIDtoEast]);
hipStreamWaitEvent(streams[gpuTopology[devIDtoEast]], eHaloEvent[gpuTopology[dev]], 0);
}
//Check if device is having a west Halo buffer
if (deviceArray[dev].wHalo_flag == 1)
{
//hipSetDevice(gpuTopology[dev]);
int devIDtoWest = getDeviceIDfromCoord(getDevCoord_X - 1, getDevCoord_Y, numberOfDevicesAlong_Y);
//Exchange Halos
//Send to the device
hipStreamWaitEvent(wHaloExchange[gpuTopology[dev]], events[gpuTopology[dev]], 0);
hipMemcpyPeerAsync(y_buffer_east[gpuTopology[devIDtoWest]], gpuTopology[devIDtoWest], d_whalos[gpuTopology[dev]], gpuTopology[dev], chunk_Y * sizeof(float), wHaloExchange[gpuTopology[dev]]);
hipEventRecord(wHaloEvent[gpuTopology[dev]], wHaloExchange[gpuTopology[dev]]);
//Postpone the next iteration kernel execution till the p2p transfers complete
//hipSetDevice(gpuTopology[devIDtoWest]);
hipStreamWaitEvent(streams[gpuTopology[devIDtoWest]], wHaloEvent[gpuTopology[dev]], 0);
}
/*if (auto err = hipGetLastError())
{
cout << "Halo Exchange Error: " << hipGetErrorString(err) << endl;
}*/
}
}
}
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(dev);
hipDeviceSynchronize();
}
high_resolution_clock::time_point t2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(t2 - t1).count();
cout << endl << "Iterations successful. Time taken in microseconds :" << duration << endl;
//Copying the final results
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(gpuTopology[dev]);
hipMemcpyAsync(&partial_resultOnHost[dev][0], d_Vec_Out[gpuTopology[dev]], domainDivision[dev] * sizeof(float), hipMemcpyDeviceToHost, streams[gpuTopology[dev]]);
}
//Sync and Destroy streams and events
for (int i = 0; i < numDevices; ++i)
{
hipSetDevice(i);
//Destroy Events
hipEventDestroy(events[i]);
hipEventDestroy(nHaloEvent[i]);
hipEventDestroy(sHaloEvent[i]);
hipEventDestroy(eHaloEvent[i]);
hipEventDestroy(wHaloEvent[i]);
//Synchro the streams
hipStreamSynchronize(streams[i]);
hipStreamDestroy(streams[i]);
//hipStreamSynchronize(streamsComm[i]);
//hipStreamDestroy(streamsComm[i]);
hipStreamSynchronize(nHaloExchange[i]);
hipStreamDestroy(nHaloExchange[i]);
hipStreamSynchronize(sHaloExchange[i]);
hipStreamDestroy(sHaloExchange[i]);
hipStreamSynchronize(eHaloExchange[i]);
hipStreamDestroy(eHaloExchange[i]);
hipStreamSynchronize(wHaloExchange[i]);
hipStreamDestroy(wHaloExchange[i]);
}
//Results copied to disk
for (int dev = 0; dev < numDevices; dev++)
{
sendToPrint(&partial_resultOnHost[dev][0], deviceArray[dev].devicePosition_X, deviceArray[dev].devicePosition_Y, numberOfDevicesAlong_X, chunk_X, chunk_Y, dim, size, result, numDevices, iterations - 1, iterations);
}
//==========================================Performance using CUDA stream ends===========================================================================
//Done in phase 2 of development: Disble P2P across devices
if (p2penabled) {
disableP2P(numDevices);
}
//Free memory on device
for (int dev = 0; dev < numDevices; dev++)
{
hipSetDevice(dev);
hipFree(d_A0[dev]);
hipFree(d_A1[dev]);
hipFree(d_A2[dev]);
hipFree(d_A3[dev]);
hipFree(d_A4[dev]);
hipFree(d_Vec_In[dev]);
hipFree(d_Vec_Out[dev]);
hipFree(d_nhalos[dev]);
hipFree(d_shalos[dev]);
hipFree(d_ehalos[dev]);
hipFree(d_whalos[dev]);
hipFree(d_Rhs[dev]);
hipFree(x_buffer_south[dev]);
hipFree(x_buffer_north[dev]);
hipFree(y_buffer_west[dev]);
hipFree(y_buffer_east[dev]);
hipHostFree(partial_resultOnHost[dev]);
hipHostFree(nHalo_pinned[dev]);
hipHostFree(sHalo_pinned[dev]);
hipHostFree(wHalo_pinned[dev]);
hipHostFree(eHalo_pinned[dev]);
hipDeviceReset();
}
cout << endl << "Device Memory free successful." << endl;
//Take care of dynamic mem location
//delete[] domainDivision;
return hipSuccess;
}
int performJacobi_MultiGPU2D_Decom(unsigned int dim, unsigned int numJacobiIt, float* A0, float* A1, float* A2, float* A3, float* A4, float* rhs, float* x_in)
{
hipError_t cudaStatus = performMultiGPUJacobi(dim, numJacobiIt, &A0[0], &A1[0], &A2[0], &A3[0], &A4[0], &rhs[0], &x_in[0]);
if (cudaStatus != hipSuccess) {
cout << "Computation failed: " << endl;
return 1;
}
if (cudaStatus != hipSuccess) {
cout << "Cuda Device Reset failed: " << endl;
return 1;
}
return 0;
}
| 379fb9d4763b3160d94b47bd169ebad93f833db4.cu | //P2P Synchronization using events and Seperate streams. Coupling-Overlapping several Exchanges together.
#include <omp.h>
#include "tinyxml.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "testMultiGPU_Jacobi2D_Decom.cuh"
#include "multiGPU_topology_optimize.cuh"
#include <iostream>
#include <chrono>
#include <memory>
#include <vector>
//#include<map>
#include <fstream>
#include <vector_types.h>
#define IMUL(a,b) __mul24(a,b)
#define DIVRND(a,b) ((a+b-1)/b)
#define BLOCKSIZE_X 32
#define BLOCKSIZE_Y 16
using namespace std;
using namespace std::chrono;
//cudaError_t performMultiGPUJacobi();
//Support for below c++14 on *nix
template<typename T, typename ...Args>
std::unique_ptr<T> make_unique(Args&& ...args)
{
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
struct create_Device
{
int deviceID;
//In a GPU topology set the GPU position
int devicePosition_X;
int devicePosition_Y;
int devicePosition_Z;
vector<float> eHalo;
vector<float> wHalo;
vector<float> nHalo;
vector<float> sHalo;
//Flags check the halos needed by the device
int eHalo_flag = 0;
int wHalo_flag = 0;
int nHalo_flag = 0;
int sHalo_flag = 0;
};
//Simple Jacobi iteration
__global__ void jacobi_Simple(const float *A0, const float *A1, const float *A2, const float *A3, const float *A4, float *x_in, float *x_out, const float *rhs, const int ehalo_flag, const int whalo_flag, const int nhalo_flag, const int shalo_flag, float *ehalo, float *whalo, float *nhalo, float *shalo, const int deviceID, const int numDevices, const int domain_Decom, int2 dim)
{
int2 pos = make_int2(
blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y
);
int index = (pos.y * dim.x) + pos.x;
//int index = threadIdx.x + blockDim.x * blockIdx.x;
float result = rhs[index];
int dim_x = dim.x;
int dim_y = dim.y;
//X_pos and Y_pos are just to understand the thread layout. Can be named to any suitable variable names
int x_pos = pos.y;
int y_pos = pos.x;
int leftBoundaryElem = x_pos * (dim_x);
int rightBoundaryElem = (x_pos * dim_x) + (dim_x - 1);
int topBoundaryElem = y_pos + ((dim_y - 1) * (dim_x));
int bottomBoundaryElem = y_pos;
/*if((deviceID==2)&&(index==leftBoundaryElem))
{
printf("For Device %d index is : %d\n", deviceID, index);
printf("For Device %d leftBoundaryElem is : %d\n", deviceID, leftBoundaryElem);
printf("rightBoundaryElem is : %d\n", rightBoundaryElem);
printf("topBoundaryElem is : %d\n", topBoundaryElem);
printf("bottomBoundaryElem is : %d\n", bottomBoundaryElem);
}*/
//Halo computation for 1D Decompostion: For the First and Last GPU Halo computation on both the sides(nhalo and shalo wont be needed)
//======Left Bounday Elem
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
//Computation using the Halos
if (index == leftBoundaryElem) {
if (whalo_flag == 1) {
result -= A1[index] * whalo[x_pos];
}
}
//======Right Bounday Elem
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index == rightBoundaryElem) {
if (ehalo_flag == 1) {
result -= A3[index] * ehalo[x_pos];
}
}
//======Bottom Bounday Elem
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index == bottomBoundaryElem) {
if (shalo_flag == 1) {
result -= A0[index] * shalo[y_pos];
}
}
//======Top Bounday Elem
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
if (index == topBoundaryElem) {
if (nhalo_flag == 1) {
result -= A4[index] * nhalo[y_pos];
}
}
result /= A2[index];
x_out[index] = result;
//Updating Halos at the End of the computation
if (index == topBoundaryElem) {
if (nhalo_flag == 1) {
nhalo[y_pos] = result;
}
}
if (index == bottomBoundaryElem) {
if (shalo_flag == 1) {
shalo[y_pos] = result;
}
}
if (index == leftBoundaryElem) {
if (whalo_flag == 1) {
whalo[x_pos] = result;
}
}
if (index == rightBoundaryElem) {
if (ehalo_flag == 1) {
ehalo[x_pos] = result;
}
}
}
//========================MultiGPU utility functions============================================================================
// load the named file and dump its structure to STDOUT
void getConfiguration(const char* pFilename, int &numDevices, int &domain_decom)
{
TiXmlDocument doc(pFilename);
bool loadOkay = doc.LoadFile();
if (loadOkay)
{
cout << "\nFile Loaded successfully\n";
TiXmlElement *pRoot = doc.RootElement();
TiXmlElement *element = pRoot->FirstChildElement();
while (element)
{
string elementName = element->Value();
string attribute = element->Attribute("name"); //Gets you the time variable
string value = element->GetText();
cout << "\n The attribute is " << attribute;
cout << "\n The elementName is " << elementName;
cout << "\n The element Value is " << value;
if (attribute == "numDevices") {
numDevices = stoi(value);
}
if (attribute == "decomposition") {
domain_decom = stoi(value);
}
element = element->NextSiblingElement();
}
}
else
{
cout << "\nCould not load config file\n";
}
}
void checkP2Paccess(int numGPUs)
{
for (int i = 0; i<numGPUs; i++)
{
cudaSetDevice(i);
for (int j = 0; j<numGPUs; j++)
{
int access;
if (i != j)
{
cudaDeviceCanAccessPeer(&access, i, j);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed : " << cudaGetErrorString(err) << endl;
return;
}
}
}
}
cout << "\n***NOTE: In case a device doesn't have P2P access to other one, it falls back to normal memcopy procedure.\nSo you can see lesser Bandwidth (GB/s) in those cases.\n\n";
}
bool enableP2P(int numGPUs)
{
for (int i = 0; i<numGPUs; i++)
{
cudaSetDevice(i);
for (int j = 0; j<numGPUs; j++)
{
int access;
cudaDeviceCanAccessPeer(&access, i, j);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed while enabling: " << cudaGetErrorString(err) << endl;
return false;
}
if (access)
{
cudaDeviceEnablePeerAccess(j, 0);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed while enabling: " << cudaGetErrorString(err) << endl;
return false;
}
}
}
}
return true;
}
void disableP2P(int numGPUs)
{
for (int i = 0; i<numGPUs; i++)
{
cudaSetDevice(i);
for (int j = 0; j<numGPUs; j++)
{
int access;
cudaDeviceCanAccessPeer(&access, i, j);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed while disabling : " << cudaGetErrorString(err) << endl;
return;
}
if (access)
{
cudaDeviceDisablePeerAccess(j);
if (auto err = cudaGetLastError())
{
cout << "P2P Operations failed while disabling: " << cudaGetErrorString(err) << endl;
return;
}
}
}
}
}
void performFactorPairing(int numDevices, int &fact_x, int &fact_y)
{
int i;
//Check if numDevices is Prime
bool isPrime = true;
for (i = 2; i < numDevices / 2; ++i)
{
if (numDevices % i == 0)
{
isPrime = false;
break;
}
}
if (isPrime)
{
fact_x = numDevices;
fact_y = 1;
}
else
{
//Finding the appropriate factor pairs to divide the grid
for (i = 2; i < numDevices / 2; ++i)
{
if (numDevices % i == 0) {
fact_x = i;
fact_y = numDevices / i;
}
}
}
}
//===============================================================================================================================
//====================================Creating Topology with the number of Devices available====================================
void generateGPUGRID(int numDevices, int &numberOfDevicesAlong_X, int &numberOfDevicesAlong_Y, int domainDecomType)
{
//Finding GPU topology along x and y
//Assumuing total number of devices is a perfect square(To be changed later)
if (domainDecomType == 1)
{
numberOfDevicesAlong_X = numDevices;
numberOfDevicesAlong_Y = 1;
}
else
{
int val = -1;
val = (int)sqrt(numDevices);
if ((val*val) == numDevices)
{
numberOfDevicesAlong_X = val;
numberOfDevicesAlong_Y = val;
}
else
{
int fact_x = 1;
int fact_y = 1;
performFactorPairing(numDevices, fact_x, fact_y);
numberOfDevicesAlong_X = fact_x;
numberOfDevicesAlong_Y = fact_y;
}
}
}
/* Creates a topology for a number of devices in a system
for ex. The devices are aware of left, right, top and bottom neigbours in 2D
1. It also decides the chunk per devices by determining x-dimension and y-dimensions for per chunk of data per device.
2. It also initializes halos for each devices which can be exchanged with the neighbours
*/
void createTopology(int numDevices, vector<create_Device> &deviceArray, int numberOfDevicesAlong_X, int numberOfDevicesAlong_Y)
{
deviceArray.resize(numDevices);
unsigned int deviceCount = 0;
for (int gridCount_Y = 0; gridCount_Y < numberOfDevicesAlong_Y; gridCount_Y++) {
for (int gridCount_X = 0; gridCount_X < numberOfDevicesAlong_X; gridCount_X++) {
deviceArray[deviceCount].deviceID = deviceCount;
deviceArray[deviceCount].devicePosition_X = gridCount_X;
deviceArray[deviceCount].devicePosition_Y = gridCount_Y;
//devicePosition_Z to be changed later
deviceArray[deviceCount].devicePosition_Z = 1;
deviceCount++;
}
}
}
//==============================================================================================================================
//Init Halos: In 1D decomposition only North and South Halos are used. In 2D decomposition North, South, East and West Halo need to be initialized and computed
//TODO:Create a Halo Exchange Mechanism for 2D Multi GPU topology
void initHalos2D(create_Device &device, int chunk_X, int chunk_Y, float *vec_in, int maxdevicesAlong_X, int maxDevicesAlong_Y, int rowStartPos, int rowEndPos, int dim)
{
/*cout << endl << "Inside Halo Computation 2D. printing Details";
cout << endl << "Device ID " << device.deviceID;
cout << endl << "Device position X " << device.devicePosition_X;
cout << endl << "Device position Y " << device.devicePosition_Y;
cout << endl << "Row Start " << rowStartPos;
cout << endl << "Row End " << rowEndPos;*/
//Assigning counter for each individual Halos. To prevent update of the same counter
//int rowStartPosEast = rowStartPos;
int rowStartPosWest = rowStartPos;
int rowStartPosNorth = rowStartPos;
int rowStartPosSouth = rowStartPos;
int rowEndPosEast = rowEndPos;
//int rowEndPosWest = rowEndPos;
//int rowEndPosNorth = rowEndPos;
//int rowEndPosSouth = rowEndPos;
//Checks provided for Boundary devices in GPU topology
if ((device.devicePosition_X - 1) >= 0) {
//cout << "West Halo needed ";
device.wHalo_flag = 1;
device.wHalo.resize(chunk_Y);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
device.wHalo[rowNum] = vec_in[rowStartPosWest];
//cout << rowStartPosWest << " ";
rowStartPosWest += dim;
}
}
if ((device.devicePosition_X + 1) < maxdevicesAlong_X) {
//cout << "East Halo needed ";
device.eHalo_flag = 1;
device.eHalo.resize(chunk_Y);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
device.eHalo[rowNum] = vec_in[rowEndPosEast];
//cout << rowEndPosEast << " ";
rowEndPosEast += dim;
}
}
if ((device.devicePosition_Y - 1) >= 0) {
//cout << "South Halo needed ";
device.sHalo_flag = 1;
device.sHalo.resize(chunk_X);
for (int rowNum = 0; rowNum < chunk_X; rowNum++)
{
device.sHalo[rowNum] = vec_in[rowStartPosSouth];
//cout << rowStartPosSouth << " ";
rowStartPosSouth++;
}
}
if ((device.devicePosition_Y + 1) < maxDevicesAlong_Y) {
//cout << "North Halo needed ";
device.nHalo_flag = 1;
device.nHalo.resize(chunk_X);
rowStartPosNorth = rowStartPosNorth + (dim * (chunk_Y - 1));
for (int rowNum = 0; rowNum < chunk_X; rowNum++)
{
device.nHalo[rowNum] = vec_in[rowStartPosNorth];
//cout << rowStartPosNorth << " ";
rowStartPosNorth++;
}
}
}
//======================================Exchange Halos: on Host==============================================
int getDeviceIDfromCoord(int devCoord_x, int devCoord_y, int numberofDevicesAlong_X) {
int devID = (devCoord_y * numberofDevicesAlong_X) + devCoord_x;
return devID;
}
void exchangehalos_onHost(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X)
{
//Halos exist in pairs so:
//Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs
//That way the number of exchanges are kept to minimum
for (int dev = 0; dev < numDevices; dev++)
{
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1) {
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberofDevicesAlong_X);
//Exchange Halos
(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberofDevicesAlong_X);
//Exchange Halos
(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo);
}
}
}
bool exchangehalos_onHostPinned(int numDevices, vector<create_Device> &deviceArray, int numberofDevicesAlong_X, vector<float*> &nHalosPinned, vector<float*> &sHalosPinned, vector<float*> &eHalosPinned, vector<float*> &wHalosPinned)
{
//Halos exist in pairs so:
//Important: A device exchanges North-to-South Pairs and East-to-West Pairs only. Not South-to-North pairs and West-to-East pairs
//That way the number of exchanges are kept to minimum
for (int dev = 0; dev < numDevices; dev++)
{
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1) {
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberofDevicesAlong_X);
//Exchange Halos
//(deviceArray[dev].nHalo).swap(deviceArray[devIDtoNorth].sHalo);
swap(nHalosPinned[dev], sHalosPinned[devIDtoNorth]);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1) {
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberofDevicesAlong_X);
//Exchange Halos
//(deviceArray[dev].eHalo).swap(deviceArray[devIDtoEast].wHalo);
swap(eHalosPinned[dev], wHalosPinned[devIDtoEast]);
}
}
return true;
}
//===========================Exchange Halos: on Host Ends=====================================================
//Init matrix Diagonals A0, A1, A2, A3, A4
void copyValues(float *A0, float *A1, float *A2, float *A3, float *A4, float *rhs, float *vec_in, float *vec_out, int dim, float *val_A0, float *val_A1, float *val_A2, float *val_A3, float *val_A4, float *val_rhs, float *val_x_in)
{
unsigned int size = dim * dim;
for (unsigned int i = 0; i < size; i++)
{
A0[i] = val_A0[i];
A1[i] = val_A1[i];
A2[i] = val_A2[i];
A3[i] = val_A3[i];
A4[i] = val_A4[i];
rhs[i] = val_rhs[i];
vec_in[i] = val_x_in[i];
vec_out[i] = 0.0f;
}
}
void getAllDeviceProperties() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
cout << " Device Number: " << i << endl;
cout << " Device name: " << prop.name << endl;
cout << " Memory Clock Rate (KHz): " << prop.memoryClockRate << endl;
cout << " Memory Bus Width (bits): " << prop.memoryBusWidth << endl;;
cout << " Peak Memory Bandwidth (GB/s): " << 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6 << endl << endl << endl;
}
}
/* Prints an output file for checking results */
void sendToPrint(float *partial_result, int devicePosition_X, int devicePosition_Y, int numberOfDevicesAlong_X, int chunk_X, int chunk_Y, int dim, int totalSize, vector<float> &result, int numDevices, int currentIteration, int numberOfTotalIterations) {
int devicePosX = devicePosition_X;
int devicePosY = devicePosition_Y;
//Calculating data position based on device coords
//numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated
//int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X);
int dataStartPos_X = (devicePosY * dim * chunk_Y) + (devicePosX * chunk_X);
int dataEndPos_X = dataStartPos_X + chunk_X;
//One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim )
int rowStartPos = dataStartPos_X;
int rowEndPos = dataEndPos_X;
int indexCounter = 0;
//cout << endl;
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
//Get one complete row for the GPU
for (int pos = rowStartPos; pos < rowEndPos; pos++)
{
result[pos] = partial_result[indexCounter];
indexCounter++;
}
//cout << endl;
rowStartPos += dim;
rowEndPos = rowStartPos + chunk_X;
}
//Printing when the last device computation is done: Remove the check to check computation for each device
int deviceID = getDeviceIDfromCoord(devicePosition_X, devicePosition_Y, numberOfDevicesAlong_X);
if ((deviceID == (numDevices - 1)) && (currentIteration == (numberOfTotalIterations - 1)))
{
ofstream myfile;
myfile.open("data2.txt");
//Printing the values here
for (int i = totalSize; i > 0; i--) {
if (i%dim == 0) {
myfile << endl;
}
myfile << result[i - 1] << " ";
}
myfile.close();
}
}
cudaError_t performMultiGPUJacobi(unsigned int val_dim, unsigned int numJacobiIt, float* val_A0, float* val_A1, float* val_A2, float* val_A3, float* val_A4, float* val_rhs, float* val_x_in)
{
//Fixed value changed later
int dim = 8;
if (val_dim != 0) {
dim = val_dim;
}
//TODO: write a 2D domain decomposition method for more than 2 GPUs
int size = dim * dim;
//auto result = make_unique<float[]>(size);
//Create Diagonal Vectors
std::vector<float> a0(size);
std::vector<float> a1(size);
std::vector<float> a2(size);
std::vector<float> a3(size);
std::vector<float> a4(size);
std::vector<float> vec_in(size);
std::vector<float> vec_out(size);
std::vector<float> rhs(size);
std::vector<float> result(size);
//Get the total number of devices
int numDevices = -1;
cudaGetDeviceCount(&numDevices);
//numDevices = 2;
//Set Decomposition dimension 1D or 2D: when decomposition is 0. Computation happens on a single GPU
int decom_Dim = 2;
//Set Values for Domain Decompostion type 1D or 2D
int domainDecom_Dim = decom_Dim;
//Read the custom config defined in file "multiGPUConfig.xml"
getConfiguration("multiGPUConfig.xml", numDevices, domainDecom_Dim);
cout << endl << "Total number of Devices in the System are : " << numDevices << endl;
getAllDeviceProperties();
//Creating a Default topology mapping (without optimization) with the number of Devices specified in the configuration
map<int, int> gpuTopology;
for (int dev = 0; dev<numDevices; dev++)
{
gpuTopology[dev] = dev;
}
//Force Topology Testing Set 1
/* gpuTopology[0] = 3;
gpuTopology[1] = 1;
gpuTopology[2] = 2;
gpuTopology[3] = 0; */
//Force Topology Testing Set 2
/*gpuTopology[0] = 3;
gpuTopology[1] = 0;
gpuTopology[2] = 2;
gpuTopology[3] = 1; */
//Force Topology Testing Set 3 -- Gives the worst Configuration
gpuTopology[0] = 1;
gpuTopology[1] = 3;
gpuTopology[2] = 2;
gpuTopology[3] = 0;
//Force Topology Testing Set 4 --Good Config-- as GPUs on same chip are neighbours
/* gpuTopology[0] = 3;
gpuTopology[1] = 2;
gpuTopology[2] = 1;
gpuTopology[3] = 0; */
//Configuring the number of GPU's manually
//numDevices=2;
copyValues(&a0[0], &a1[0], &a2[0], &a3[0], &a4[0], &rhs[0], &vec_in[0], &vec_out[0], dim, &val_A0[0], &val_A1[0], &val_A2[0], &val_A3[0], &val_A4[0], &val_rhs[0], &val_x_in[0]);
vector<create_Device> deviceArray;
/* Distributed Compuation using Halos: Algorithm
1. Init Halos.
1.a) In 1D decomposition nhalo and shalo intialized from vector x_in
1.b) In 2D decompsition nhalo,shalo, ehalo and whalo initialozed from vector x_in
2. Pass the halos to Jacobi_kernal.
3. Store the result computed at the boundary into the halo boundary positions.
4. Swap nhalo and shalo pairs in 1D decompostion. Swap (nhalo,shalo) and (ehalo,whalo) in 2D.
*/
//=================================Domain Decomposition Logic Starts=================================================================
/*Generating a GPU Grid with multiple GPUs and creating a Topology*/
int numberOfDevicesAlong_X = 1;
int numberOfDevicesAlong_Y = 1;
generateGPUGRID(numDevices, numberOfDevicesAlong_X, numberOfDevicesAlong_Y, domainDecom_Dim);
cout << "GPU grid structure is : " << numberOfDevicesAlong_X << " X " << numberOfDevicesAlong_Y << endl;
//Total elements along each dim in 2D
int chunk_X = dim / numberOfDevicesAlong_X;
int chunk_Y = dim / numberOfDevicesAlong_Y;
/* Creating a GPU topology with multiple devices*/
createTopology(numDevices, deviceArray, numberOfDevicesAlong_X, numberOfDevicesAlong_Y);
/*Optimize topology 1D or 2D. Depending upon the required domain division */
if (domainDecom_Dim == 1)
{
gpuTopology = outputLatencyMatrix(numDevices, false, numberOfDevicesAlong_X, numberOfDevicesAlong_Y, domainDecom_Dim);
}
else//for 2D domain decomposition or higher
{
gpuTopology = outputLatencyMatrix(numDevices, false, numberOfDevicesAlong_X, numberOfDevicesAlong_Y, domainDecom_Dim);
}
//Enable Peer-to-Peer access across all GPUs : Done on phase 2 of development
bool p2penabled = false;
p2penabled = enableP2P(numDevices);
//Let the total number of GPU be 2 : has to be changed later
//Computation divided into (size/2) on first and size-(size/2) on second
std::vector<int> domainDivision(numDevices);
//Logic for total chunk per device (Domain distribution)
for (int i = 0; i < numDevices; i++) {
//Chunk per GPU will be same irrepective of 1D or 2D decomposition
domainDivision[i] = size / numDevices;
}
//For use on Device
std::vector<float*>d_A0(numDevices);
std::vector<float*>d_A1(numDevices);
std::vector<float*>d_A2(numDevices);
std::vector<float*>d_A3(numDevices);
std::vector<float*>d_A4(numDevices);
std::vector<float*>d_Vec_In(numDevices);
std::vector<float*>d_Vec_Out(numDevices);
std::vector<float*>d_nhalos(numDevices);
std::vector<float*>d_shalos(numDevices);
std::vector<float*>d_ehalos(numDevices);
std::vector<float*>d_whalos(numDevices);
std::vector<float*>d_Rhs(numDevices);
//Device Buffers for parallel communication using streams: Concept of Front and Back Buffer Oct 30, 2017
std::vector<float*>x_buffer_north(numDevices);
std::vector<float*>x_buffer_south(numDevices);
std::vector<float*>y_buffer_west(numDevices);
std::vector<float*>y_buffer_east(numDevices);
//Note: Using Pinned memory on Host for Halos -> Performance Approach 1
vector<float*>nHalo_pinned(numDevices);
vector<float*>sHalo_pinned(numDevices);
vector<float*>wHalo_pinned(numDevices);
vector<float*>eHalo_pinned(numDevices);
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(gpuTopology[dev]);
cudaMallocHost((void**)&nHalo_pinned[gpuTopology[dev]], (chunk_X) * sizeof(float));
cudaMallocHost((void**)&sHalo_pinned[gpuTopology[dev]], (chunk_X) * sizeof(float));
cudaMallocHost((void**)&wHalo_pinned[gpuTopology[dev]], (chunk_Y) * sizeof(float));
cudaMallocHost((void**)&eHalo_pinned[gpuTopology[dev]], (chunk_Y) * sizeof(float));
}
for (int dev = 0; dev < numDevices; dev++)
{
//Setting the device before allocation
cudaSetDevice(gpuTopology[dev]);
//cudamalloc the Diagonals
cudaMalloc((void**)&d_A0[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A1[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A2[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A3[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A4[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
//Using pinned memory as part of performance upgrade- Phase 2 of development
//cudamalloc the Input Vector and Result vector
cudaMalloc((void**)&d_Vec_In[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_Vec_Out[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_Rhs[gpuTopology[dev]], domainDivision[dev] * sizeof(float));
//cudaMalloc Halos: North and South--1D. TODO: East and West for 2D
cudaMalloc((void**)&d_nhalos[gpuTopology[dev]], chunk_X * sizeof(float));
cudaMalloc((void**)&d_shalos[gpuTopology[dev]], chunk_X * sizeof(float));
cudaMalloc((void**)&d_ehalos[gpuTopology[dev]], chunk_Y * sizeof(float));
cudaMalloc((void**)&d_whalos[gpuTopology[dev]], chunk_Y * sizeof(float));
//Buffer memory used for p2p exchange
cudaMalloc((void**)&x_buffer_north[gpuTopology[dev]], chunk_X * sizeof(float));
cudaMalloc((void**)&x_buffer_south[gpuTopology[dev]], chunk_X * sizeof(float));
cudaMalloc((void**)&y_buffer_west[gpuTopology[dev]], chunk_Y * sizeof(float));
cudaMalloc((void**)&y_buffer_east[gpuTopology[dev]], chunk_Y * sizeof(float));
}
/* The transfer of Data from Host to Device : Domain Decomposition in 2D*/
if (decom_Dim == 2) {
//Create Partial Diagonal Vectors
//Size per GPU will be
int chunkSize = chunk_X * chunk_Y;
std::vector<float> partial_a0(chunkSize);
std::vector<float> partial_a1(chunkSize);
std::vector<float> partial_a2(chunkSize);
std::vector<float> partial_a3(chunkSize);
std::vector<float> partial_a4(chunkSize);
std::vector<float> partial_vec_in(chunkSize);
std::vector<float> partial_vec_out(chunkSize);
std::vector<float> partial_rhs(chunkSize);
std::vector<float> partial_result(chunkSize);
for (int dev = 0; dev < numDevices; dev++)
{
//Test the properties of the device assigned
//cout << endl << "New Logical Device created " << deviceArray[dev].deviceID;
//cout << endl << "New Logical Device (X,Y) coord (" << deviceArray[dev].devicePosition_X << "," << deviceArray[dev].devicePosition_Y << ")";
//==========Important: Logic for creation of Chunks to be allocated to GPUs==========================================
//Important : Mention about the correlation between the topology and data position in the thesis
int devicePosX = deviceArray[dev].devicePosition_X;
int devicePosY = deviceArray[dev].devicePosition_Y;
//cout << endl << "For Device ID " << deviceArray[dev].deviceID << endl;
//cout << endl << "Device pos X " << devicePosX << endl;
//cout << endl << "Device pos Y " << devicePosY << endl;
//cout << endl << "Chunk X " << chunk_X << endl;
//cout << endl << "Chunk Y " << chunk_Y << endl;
//cout << endl << "Number of device along X " << numberOfDevicesAlong_X << endl;
//cout << endl << "Number of device along Y " << numberOfDevicesAlong_Y << endl;
//Calculating data position based on device coords
//numberOfDevicesAlong_X * Chunk_X * Chunk_Y : finds out the total data per row of GPUs allocated
//int dataStartPos_X = (devicePosX * numberOfDevicesAlong_X * chunk_X * chunk_Y) + (devicePosY * chunk_X);
int dataStartPos_X = (devicePosY * dim * chunk_Y) + (devicePosX * chunk_X);
int dataEndPos_X = dataStartPos_X + chunk_X;
//cout << endl << "Data Start Pos is " << dataStartPos_X << endl;
//cout << endl << "Data End Pos is " << dataEndPos_X << endl;
//One complete row across all GPU is dim in order to get the next element above an element we add (currentPosition + dim )
int rowStartPos = dataStartPos_X;
int rowEndPos = dataEndPos_X;
int indexCounter = 0;
//Initialize Halos
initHalos2D(deviceArray[dev], chunk_X, chunk_Y, &vec_in[0], numberOfDevicesAlong_X, numberOfDevicesAlong_Y, rowStartPos, rowEndPos - 1, dim);
for (int rowNum = 0; rowNum < chunk_Y; rowNum++)
{
//cout << endl << "Data Start Pos is " << rowStartPos << endl;
//Get one complete row for the GPU
for (int pos = rowStartPos; pos < rowEndPos; pos++)
{
partial_a0[indexCounter] = a0[pos];
partial_a1[indexCounter] = a1[pos];
partial_a2[indexCounter] = a2[pos];
partial_a3[indexCounter] = a3[pos];
partial_a4[indexCounter] = a4[pos];
partial_vec_in[indexCounter] = vec_in[pos];
partial_vec_out[indexCounter] = vec_out[pos];
partial_rhs[indexCounter] = rhs[pos];
partial_result[indexCounter] = result[pos];
indexCounter++;
}
//cout << endl << "Data End Pos is " << rowEndPos << endl;
rowStartPos += dim;
rowEndPos = rowStartPos + chunk_X;
}
//==========Important: Logic for creation of Chunks to be allocated to GPUs Ends ==========================================
//Setting Cuda device
cudaSetDevice(gpuTopology[dev]);
//Copy the diagonals from host to device : calling all at once instead of putting inside the for loop
cudaMemcpy(d_A0[gpuTopology[dev]], &partial_a0[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A1[gpuTopology[dev]], &partial_a1[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A2[gpuTopology[dev]], &partial_a2[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A3[gpuTopology[dev]], &partial_a3[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A4[gpuTopology[dev]], &partial_a4[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
//Copy in and out vectors and RHS
cudaMemcpy(d_Vec_In[gpuTopology[dev]], &partial_vec_in[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Vec_Out[gpuTopology[dev]], &partial_vec_out[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Rhs[gpuTopology[dev]], &partial_rhs[0], domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
}
if (auto err = cudaGetLastError())
{
cout << "Data copy failed 1: " << cudaGetErrorString(err) << endl;
return err;
}
//Copy intial Halos in 2D
//Initial Exchange Halos: Then do intial cudaMemcopies
exchangehalos_onHost(numDevices, deviceArray, numberOfDevicesAlong_X);
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(gpuTopology[dev]);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
cudaMemcpy(d_nhalos[gpuTopology[dev]], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].sHalo_flag == 1)
{
cudaMemcpy(d_shalos[gpuTopology[dev]], &deviceArray[dev].sHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].eHalo_flag == 1)
{
cudaMemcpy(d_ehalos[gpuTopology[dev]], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].wHalo_flag == 1)
{
cudaMemcpy(d_whalos[gpuTopology[dev]], &deviceArray[dev].wHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice);
}
}
if (auto err = cudaGetLastError())
{
cout << "Halo Copy Failed " << cudaGetErrorString(err) << endl;
return err;
}
//Development phase 2 changes : For p2p operation communication initialize buffers
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(gpuTopology[dev]);
//Copying Halos to the device
if (deviceArray[dev].nHalo_flag == 1)
{
//cout << "Device ID for nHaloFlag is : " << deviceArray[dev].deviceID<<endl;
cudaMemcpy(x_buffer_north[gpuTopology[dev]], &deviceArray[dev].nHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].sHalo_flag == 1)
{
//cout << "Device ID for sHaloFlag is : " << deviceArray[dev].deviceID << endl;
cudaMemcpy(x_buffer_south[gpuTopology[dev]], &deviceArray[dev].sHalo[0], chunk_X * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].eHalo_flag == 1)
{
//cout << "Device ID for eHaloFlag is : " << deviceArray[dev].deviceID << endl;
cudaMemcpy(y_buffer_east[gpuTopology[dev]], &deviceArray[dev].eHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice);
}
if (deviceArray[dev].wHalo_flag == 1)
{
//cout << "Device ID for wHaloFlag is : " << deviceArray[dev].deviceID << endl;
cudaMemcpy(y_buffer_west[gpuTopology[dev]], &deviceArray[dev].wHalo[0], chunk_Y * sizeof(float), cudaMemcpyHostToDevice);
}
}
}
//=================================Domain Decomposition Logic Ends =================================================================
//=================================Setting up the grids and blocks for kernel launch================================================
//int blocksize = -1;
//int threads = -1;
int2 myDim;
myDim.x = chunk_X;
myDim.y = chunk_Y;
dim3 block(BLOCKSIZE_X, BLOCKSIZE_Y);
dim3 grid(DIVRND(myDim.x, BLOCKSIZE_X), DIVRND(myDim.y, BLOCKSIZE_Y));
//==================================================================================================================================
//Call to kernal
int iterations = 0;
if (numJacobiIt != 0) {
iterations = numJacobiIt;
}
else
{
cout << endl << " No. of iterations is zero exiting... ";
//return;
}
//===========================================CUDA Stream implementation for performance. Phase 2 of Development ====================================================
//===========Algorithm Improvement: Identify the neighbours so that they could be launched together and the exchange can take place. Without having to wait for computation across all devices============================
//cudaStream_t streams[4];//Possible to declare it dynamically ? Yes. Using Vectors.
vector<cudaStream_t> streams(numDevices);
//vector<cudaStream_t> streamsComm(numDevices);
//Create seperate streams for each Halo Exchange
vector<cudaStream_t> nHaloExchange(numDevices);
vector<cudaStream_t> sHaloExchange(numDevices);
vector<cudaStream_t> eHaloExchange(numDevices);
vector<cudaStream_t> wHaloExchange(numDevices);
//cudaStream_t nHaloExchange[4];
//cudaStream_t sHaloExchange[4];
//cudaStream_t eHaloExchange[4];
//cudaStream_t wHaloExchange[4];
//Note: Default stream for a device is always syncronizing so creating seperate streams for each device
for (int i = 0; i < numDevices; i++)
{
cudaSetDevice(gpuTopology[i]);
cudaStreamCreate(&streams[gpuTopology[i]]);
//cudaStreamCreate(&streamsComm[i]);
if (p2penabled) {
cudaStreamCreate(&nHaloExchange[gpuTopology[i]]);
cudaStreamCreate(&sHaloExchange[gpuTopology[i]]);
cudaStreamCreate(&eHaloExchange[gpuTopology[i]]);
cudaStreamCreate(&wHaloExchange[gpuTopology[i]]);
}
}
//For explicit synchornizing p2p transfers and async memcopies
//cudaEvent_t events[4];
vector<cudaEvent_t> events(numDevices);
vector<cudaEvent_t> nHaloEvent(numDevices);
vector<cudaEvent_t> sHaloEvent(numDevices);
vector<cudaEvent_t> eHaloEvent(numDevices);
vector<cudaEvent_t> wHaloEvent(numDevices);
//cudaEvent_t nHaloEvent[4];
//cudaEvent_t sHaloEvent[4];
//cudaEvent_t eHaloEvent[4];
//cudaEvent_t wHaloEvent[4];
for (int i = 0; i < numDevices; i++)
{
cudaSetDevice(gpuTopology[i]);
cudaEventCreate(&events[gpuTopology[i]]);
if (p2penabled) {
cudaEventCreate(&nHaloEvent[gpuTopology[i]]);
cudaEventCreate(&sHaloEvent[gpuTopology[i]]);
cudaEventCreate(&eHaloEvent[gpuTopology[i]]);
cudaEventCreate(&wHaloEvent[gpuTopology[i]]);
}
}
/*Using a pagable memory first*/
//std::vector<float> partial_resultOnHost(chunk_X * chunk_Y);
/*Using a pinned(page locked) memory for performance*/
vector<float*>partial_resultOnHost(numDevices);
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(gpuTopology[dev]);
cudaMallocHost((void**)&partial_resultOnHost[dev], (chunk_X * chunk_Y) * sizeof(float));
}
//==============================================================
//Check performance
cudaError_t status = cudaGetLastError();
high_resolution_clock::time_point t1 = high_resolution_clock::now();
#pragma omp parallel num_threads(numDevices)
{
int dev = omp_get_thread_num();
//cudaSetDevice(omp_get_thread_num());
for (int i = 0; i <= iterations; i++)
{
cudaSetDevice(gpuTopology[dev]);
#pragma omp barrier
if ((i>0))
{
//As this is not a run on a single host thread race conditions occurs. So have to manage the pointer swapping by creating a copy
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1)
{
swap(x_buffer_north[gpuTopology[dev]], d_nhalos[gpuTopology[dev]]);
}
//Check if device is having a south Halo buffer
if (deviceArray[dev].sHalo_flag == 1)
{
swap(x_buffer_south[gpuTopology[dev]], d_shalos[gpuTopology[dev]]);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1)
{
swap(y_buffer_east[gpuTopology[dev]], d_ehalos[gpuTopology[dev]]);
}
//Check if device is having a west Halo buffer
if (deviceArray[dev].wHalo_flag == 1)
{
swap(y_buffer_west[gpuTopology[dev]], d_whalos[gpuTopology[dev]]);
}
}
jacobi_Simple << <grid, block, 0, streams[gpuTopology[dev]] >> >(d_A0[gpuTopology[dev]], d_A1[gpuTopology[dev]], d_A2[gpuTopology[dev]], d_A3[gpuTopology[dev]], d_A4[gpuTopology[dev]], d_Vec_In[gpuTopology[dev]], d_Vec_Out[gpuTopology[dev]], d_Rhs[gpuTopology[dev]], deviceArray[dev].eHalo_flag, deviceArray[dev].wHalo_flag, deviceArray[dev].nHalo_flag, deviceArray[dev].sHalo_flag, d_ehalos[gpuTopology[dev]], d_whalos[gpuTopology[dev]], d_nhalos[gpuTopology[dev]], d_shalos[gpuTopology[dev]], deviceArray[dev].deviceID, numDevices, decom_Dim, myDim);
//jacobi_Comm << <grid, block, 0, streamsComm[dev] >> >(d_A0[dev], d_A1[dev], d_A2[dev], d_A3[dev], d_A4[dev], d_Vec_In[dev], d_Vec_Out[dev], d_Rhs[dev], deviceArray[dev].eHalo_flag, deviceArray[dev].wHalo_flag, deviceArray[dev].nHalo_flag, deviceArray[dev].sHalo_flag, d_ehalos[dev], d_whalos[dev], d_nhalos[dev], d_shalos[dev], deviceArray[dev].deviceID, numDevices, decom_Dim, myDim);
//For Synchronizing while Halo Exchange start
cudaEventRecord(events[gpuTopology[dev]], streams[gpuTopology[dev]]);
swap(d_Vec_In[gpuTopology[dev]], d_Vec_Out[gpuTopology[dev]]);
if (auto err = cudaGetLastError())
{
cout << "Data copy failed 2: " << cudaGetErrorString(err) << endl;
//return err;
}
//Exchange Halos after each iteration except the last iteration
if ((i < (iterations - 1)))
{
//============Important: Before copying to buffers make sure the kernel on the respective GPU(s) finished execution using cudaStreamWaitEvent=======================
int getDevCoord_X = deviceArray[dev].devicePosition_X;
int getDevCoord_Y = deviceArray[dev].devicePosition_Y;
#pragma omp barrier // Important: To make sure all threads assign proper values to duplicate pointers before Halo Exchange Begins
//Check if device is having a north Halo buffer
if (deviceArray[dev].nHalo_flag == 1)
{
//cudaSetDevice(gpuTopology[dev]);
int devIDtoNorth = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y + 1, numberOfDevicesAlong_X);
//Exchange Halos
//Send to the device
cudaStreamWaitEvent(nHaloExchange[gpuTopology[dev]], events[gpuTopology[dev]], 0);
cudaMemcpyPeerAsync(x_buffer_south[gpuTopology[devIDtoNorth]], gpuTopology[devIDtoNorth], d_nhalos[gpuTopology[dev]], gpuTopology[dev], chunk_X * sizeof(float), nHaloExchange[gpuTopology[dev]]);
cudaEventRecord(nHaloEvent[gpuTopology[dev]], nHaloExchange[gpuTopology[dev]]);
//Postpone the next iteration kernel execution till the p2p transfers complete
//cudaSetDevice(gpuTopology[devIDtoNorth]);
cudaStreamWaitEvent(streams[gpuTopology[devIDtoNorth]], nHaloEvent[gpuTopology[dev]], 0);
}
//Check if device is having a south Halo buffer
if (deviceArray[dev].sHalo_flag == 1)
{
//cudaSetDevice(gpuTopology[dev]);
int devIDtoSouth = getDeviceIDfromCoord(getDevCoord_X, getDevCoord_Y - 1, numberOfDevicesAlong_X);
//Exchange Halos
//Send to the device
cudaStreamWaitEvent(sHaloExchange[gpuTopology[dev]], events[gpuTopology[dev]], 0);
cudaMemcpyPeerAsync(x_buffer_north[gpuTopology[devIDtoSouth]], gpuTopology[devIDtoSouth], d_shalos[gpuTopology[dev]], gpuTopology[dev], chunk_X * sizeof(float), sHaloExchange[gpuTopology[dev]]);
cudaEventRecord(sHaloEvent[gpuTopology[dev]], sHaloExchange[gpuTopology[dev]]);
//Postpone the next iteration kernel execution till the p2p transfers complete
//cudaSetDevice(gpuTopology[devIDtoSouth]);
cudaStreamWaitEvent(streams[gpuTopology[devIDtoSouth]], sHaloEvent[gpuTopology[dev]], 0);
}
//Check if device is having a east Halo buffer
if (deviceArray[dev].eHalo_flag == 1)
{
//cudaSetDevice(gpuTopology[dev]);
int devIDtoEast = getDeviceIDfromCoord(getDevCoord_X + 1, getDevCoord_Y, numberOfDevicesAlong_Y);
//Exchange Halos
//Send to the device
cudaStreamWaitEvent(eHaloExchange[gpuTopology[dev]], events[gpuTopology[dev]], 0);
cudaMemcpyPeerAsync(y_buffer_west[gpuTopology[devIDtoEast]], gpuTopology[devIDtoEast], d_ehalos[gpuTopology[dev]], gpuTopology[dev], chunk_Y * sizeof(float), eHaloExchange[gpuTopology[dev]]);
cudaEventRecord(eHaloEvent[gpuTopology[dev]], eHaloExchange[gpuTopology[dev]]);
//Postpone the next iteration kernel execution till the p2p transfers complete
//cudaSetDevice(gpuTopology[devIDtoEast]);
cudaStreamWaitEvent(streams[gpuTopology[devIDtoEast]], eHaloEvent[gpuTopology[dev]], 0);
}
//Check if device is having a west Halo buffer
if (deviceArray[dev].wHalo_flag == 1)
{
//cudaSetDevice(gpuTopology[dev]);
int devIDtoWest = getDeviceIDfromCoord(getDevCoord_X - 1, getDevCoord_Y, numberOfDevicesAlong_Y);
//Exchange Halos
//Send to the device
cudaStreamWaitEvent(wHaloExchange[gpuTopology[dev]], events[gpuTopology[dev]], 0);
cudaMemcpyPeerAsync(y_buffer_east[gpuTopology[devIDtoWest]], gpuTopology[devIDtoWest], d_whalos[gpuTopology[dev]], gpuTopology[dev], chunk_Y * sizeof(float), wHaloExchange[gpuTopology[dev]]);
cudaEventRecord(wHaloEvent[gpuTopology[dev]], wHaloExchange[gpuTopology[dev]]);
//Postpone the next iteration kernel execution till the p2p transfers complete
//cudaSetDevice(gpuTopology[devIDtoWest]);
cudaStreamWaitEvent(streams[gpuTopology[devIDtoWest]], wHaloEvent[gpuTopology[dev]], 0);
}
/*if (auto err = cudaGetLastError())
{
cout << "Halo Exchange Error: " << cudaGetErrorString(err) << endl;
}*/
}
}
}
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
high_resolution_clock::time_point t2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(t2 - t1).count();
cout << endl << "Iterations successful. Time taken in microseconds :" << duration << endl;
//Copying the final results
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(gpuTopology[dev]);
cudaMemcpyAsync(&partial_resultOnHost[dev][0], d_Vec_Out[gpuTopology[dev]], domainDivision[dev] * sizeof(float), cudaMemcpyDeviceToHost, streams[gpuTopology[dev]]);
}
//Sync and Destroy streams and events
for (int i = 0; i < numDevices; ++i)
{
cudaSetDevice(i);
//Destroy Events
cudaEventDestroy(events[i]);
cudaEventDestroy(nHaloEvent[i]);
cudaEventDestroy(sHaloEvent[i]);
cudaEventDestroy(eHaloEvent[i]);
cudaEventDestroy(wHaloEvent[i]);
//Synchro the streams
cudaStreamSynchronize(streams[i]);
cudaStreamDestroy(streams[i]);
//cudaStreamSynchronize(streamsComm[i]);
//cudaStreamDestroy(streamsComm[i]);
cudaStreamSynchronize(nHaloExchange[i]);
cudaStreamDestroy(nHaloExchange[i]);
cudaStreamSynchronize(sHaloExchange[i]);
cudaStreamDestroy(sHaloExchange[i]);
cudaStreamSynchronize(eHaloExchange[i]);
cudaStreamDestroy(eHaloExchange[i]);
cudaStreamSynchronize(wHaloExchange[i]);
cudaStreamDestroy(wHaloExchange[i]);
}
//Results copied to disk
for (int dev = 0; dev < numDevices; dev++)
{
sendToPrint(&partial_resultOnHost[dev][0], deviceArray[dev].devicePosition_X, deviceArray[dev].devicePosition_Y, numberOfDevicesAlong_X, chunk_X, chunk_Y, dim, size, result, numDevices, iterations - 1, iterations);
}
//==========================================Performance using CUDA stream ends===========================================================================
//Done in phase 2 of development: Disble P2P across devices
if (p2penabled) {
disableP2P(numDevices);
}
//Free memory on device
for (int dev = 0; dev < numDevices; dev++)
{
cudaSetDevice(dev);
cudaFree(d_A0[dev]);
cudaFree(d_A1[dev]);
cudaFree(d_A2[dev]);
cudaFree(d_A3[dev]);
cudaFree(d_A4[dev]);
cudaFree(d_Vec_In[dev]);
cudaFree(d_Vec_Out[dev]);
cudaFree(d_nhalos[dev]);
cudaFree(d_shalos[dev]);
cudaFree(d_ehalos[dev]);
cudaFree(d_whalos[dev]);
cudaFree(d_Rhs[dev]);
cudaFree(x_buffer_south[dev]);
cudaFree(x_buffer_north[dev]);
cudaFree(y_buffer_west[dev]);
cudaFree(y_buffer_east[dev]);
cudaFreeHost(partial_resultOnHost[dev]);
cudaFreeHost(nHalo_pinned[dev]);
cudaFreeHost(sHalo_pinned[dev]);
cudaFreeHost(wHalo_pinned[dev]);
cudaFreeHost(eHalo_pinned[dev]);
cudaDeviceReset();
}
cout << endl << "Device Memory free successful." << endl;
//Take care of dynamic mem location
//delete[] domainDivision;
return cudaSuccess;
}
int performJacobi_MultiGPU2D_Decom(unsigned int dim, unsigned int numJacobiIt, float* A0, float* A1, float* A2, float* A3, float* A4, float* rhs, float* x_in)
{
cudaError_t cudaStatus = performMultiGPUJacobi(dim, numJacobiIt, &A0[0], &A1[0], &A2[0], &A3[0], &A4[0], &rhs[0], &x_in[0]);
if (cudaStatus != cudaSuccess) {
cout << "Computation failed: " << endl;
return 1;
}
if (cudaStatus != cudaSuccess) {
cout << "Cuda Device Reset failed: " << endl;
return 1;
}
return 0;
}
|
f8127f9c8be417793916034d87e089c3666ce6d6.hip | // !!! This is a file automatically generated by hipify!!!
#include <THHUNN/THHUNN.h>
#include <TH/THHalf.h>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <THH/THHApply.cuh>
template <typename T>
struct LeakyReLUUpdateOutput
{
const T negval_;
LeakyReLUUpdateOutput(T negval)
: negval_(negval)
{}
__device__ __forceinline__ void operator()(T *out, T *in)
{
T x = *in;
*out = (x > 0) ? x : x * negval_;
}
};
// in-place variant
template <typename T>
struct LeakyReLUUpdateOutputIP
{
const T negval_;
LeakyReLUUpdateOutputIP(T negval)
: negval_(negval)
{}
__device__ __forceinline__ void operator()(T *x)
{
*x = (*x > 0) ? *x : negval_ * (*x);
}
};
template <typename T>
struct LeakyReLUUpdateGradInput
{
const T negval_;
LeakyReLUUpdateGradInput(T negval)
: negval_(negval)
{}
__device__ __forceinline__ void operator()(
T* gradInput,
T* input,
T* gradOutput) const
{
*gradInput = (*input > 0) ? *gradOutput : (*gradOutput) * negval_;
}
};
template <typename T>
struct LeakyReLUUpdateGradInputIP
{
const T negval_;
LeakyReLUUpdateGradInputIP(T negval)
: negval_(negval)
{}
__device__ __forceinline__ void operator()(
T* gradOutput,
T* input) const
{
*gradOutput = (*input > 0) ? *gradOutput : (*gradOutput) * negval_;
}
};
#include <THHUNN/generic/LeakyReLU.hip>
#include <THH/THHGenerateFloatTypes.h>
| f8127f9c8be417793916034d87e089c3666ce6d6.cu | #include <THCUNN/THCUNN.h>
#include <TH/THHalf.h>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <THC/THCApply.cuh>
template <typename T>
struct LeakyReLUUpdateOutput
{
const T negval_;
LeakyReLUUpdateOutput(T negval)
: negval_(negval)
{}
__device__ __forceinline__ void operator()(T *out, T *in)
{
T x = *in;
*out = (x > 0) ? x : x * negval_;
}
};
// in-place variant
template <typename T>
struct LeakyReLUUpdateOutputIP
{
const T negval_;
LeakyReLUUpdateOutputIP(T negval)
: negval_(negval)
{}
__device__ __forceinline__ void operator()(T *x)
{
*x = (*x > 0) ? *x : negval_ * (*x);
}
};
template <typename T>
struct LeakyReLUUpdateGradInput
{
const T negval_;
LeakyReLUUpdateGradInput(T negval)
: negval_(negval)
{}
__device__ __forceinline__ void operator()(
T* gradInput,
T* input,
T* gradOutput) const
{
*gradInput = (*input > 0) ? *gradOutput : (*gradOutput) * negval_;
}
};
template <typename T>
struct LeakyReLUUpdateGradInputIP
{
const T negval_;
LeakyReLUUpdateGradInputIP(T negval)
: negval_(negval)
{}
__device__ __forceinline__ void operator()(
T* gradOutput,
T* input) const
{
*gradOutput = (*input > 0) ? *gradOutput : (*gradOutput) * negval_;
}
};
#include <THCUNN/generic/LeakyReLU.cu>
#include <THC/THCGenerateFloatTypes.h>
|
8fc701787a7f2aab4d34168df7a3b4cdfa564429.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "CudaObject.h"
#include "CudaCommon.cuh"
namespace gpu_cuda {
__device__ float activator_derivative( float x )
{
float sig = 1.0f / (1.0f + exp( -x ));
return sig * (1 - sig);
}
__global__ void calcDetectObjectsForwardGPU(float *in, float *out, int in_size_x, int in_size_y, int in_size_z, int max_bounding_boxes, int max_classes )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
for( int i = 0; i < max_bounding_boxes; i=i+(4+max_classes)){
int index = id * (in_size_x * in_size_y * in_size_z) + i;
out[index ] = 1.0f / (1.0f + exp( -in[index ] )); // x: sigmoid
out[index+1] = 1.0f / (1.0f + exp( -in[index+1] )); // y: sigmoid
out[index+2] = exp( in[index+2] ); // w: exp
out[index+3] = exp( in[index+3] ); // h: exp
for( int c = 0; c < max_classes; ++c){
int index2 = id * (in_size_x * in_size_y * in_size_z) + i+4+c;
out[index2] = 1.0f / (1.0f + exp( -in[index2] )); // id: sigmoid
}
}
/* original
for(int b = 0; b < in.size.b; ++b ){
for( int i = 0; i < _max_bounding_boxes; i=i+(4+_max_classes)){
out( b, i , 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i , 0, 0 ) )); // x: sigmoid
out( b, i+1, 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i+1, 0, 0 ) )); // y: sigmoid
out( b, i+2, 0, 0 ) = exp( in( b, i+2, 0, 0 ) ); // w: exp
out( b, i+3, 0, 0 ) = exp( in( b, i+3, 0, 0 ) ); // h: exp
for( int c = 0; c < _max_classes; ++c){
out( b, i+4+c, 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i+4+c , 0, 0 ) )); // id: sigmoid
}
}
}
*/
}
__global__ void calcDetectObjectsBackwardGPU( float *dz_in, float *dz, float *in, int batch_size, int in_size_x, int in_size_y, int in_size_z, int max_bounding_boxes, int max_classes )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
for( int i = 0; i < max_bounding_boxes; i=i+(4+max_classes)){
int index = id * (in_size_x * in_size_y * in_size_z) + i;
dz[index ] = activator_derivative( in[index ] ) * dz_in[index ]; // x: sigmoid derivative * grads
dz[index+1] = activator_derivative( in[index+1] ) * dz_in[index+1]; // y: sigmoid derivative * grads
dz[index+2] = exp( in[index+2] ) * dz_in[index+2]; // w: exp * grads
dz[index+3] = exp( in[index+3] ) * dz_in[index+3]; // w: exp * grads
for( int c = 0; c <max_classes; ++c){
int index2 = id * (in_size_x * in_size_y * in_size_z) + i+4+c;
dz[index2] = activator_derivative( in[index2] ) * dz_in[index2]; // id: sigmoid derivative * grads
}
}
/* original code
for(int b = 0; b < dz_in.size.b; ++b ){
for( int i = 0; i < _max_bounding_boxes; i=i+(4+_max_classes)){
dz( b, i , 0, 0 ) = activator_derivative( in( b, i , 0, 0 ) ) * dz_in( b, i , 0, 0 ); // x: sigmoid derivative * grads
dz( b, i+1, 0, 0 ) = activator_derivative( in( b, i+1 , 0, 0 ) ) * dz_in( b, i+1, 0, 0 ); // y: sigmoid derivative * grads
dz( b, i+2, 0, 0 ) = exp( in( b, i+2, 0, 0 ) ) * dz_in( b, i+2, 0, 0 ); // w: exp * grads
dz( b, i+3, 0, 0 ) = exp( in( b, i+3, 0, 0 ) ) * dz_in( b, i+3, 0, 0 ); // h: exp * grads
for( int c = 0; c <_max_classes; ++c){
dz( b, i+4+c, 0, 0 ) = activator_derivative( in( b, i+4+c , 0, 0 ) ) * dz_in( b, i+4+c , 0, 0 ); // id: sigmoid derivative * grads
}
}
}
*/
}
void detectObjectsForwardGPU(float *in, float *out, int batch_size, int in_size_x, int in_size_y, int in_size_z, int max_bounding_boxes, int max_classes )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(batch_size);
hipLaunchKernelGGL(( calcDetectObjectsForwardGPU), dim3(grid), dim3(BLOCK), 0, 0, in, out, in_size_x, in_size_y, in_size_z, max_bounding_boxes, max_classes );
}
void detectObjectsBackwardAddFirstArrayToSecondArrayGPU( float *dz_next_layer, float *dz_in, int N )
{
CudaObject cuda = CudaObject();
dim3 grid_in = cuda.cudaGridSize(N);
hipLaunchKernelGGL(( cudaAddFirstArrayToSecondArray), dim3(grid_in), dim3(BLOCK), 0, 0, dz_next_layer, dz_in, N );
}
void detectObjectsBackwardGPU( float *dz_in, float *dz, float *in, int batch_size, int in_size_x, int in_size_y, int in_size_z, int max_bounding_boxes, int max_classes )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(batch_size);
hipLaunchKernelGGL(( calcDetectObjectsBackwardGPU), dim3(grid), dim3(BLOCK), 0, 0, dz_in, dz, in, batch_size, in_size_x, in_size_y, in_size_z, max_bounding_boxes, max_classes );
}
} // namespace gpu
| 8fc701787a7f2aab4d34168df7a3b4cdfa564429.cu | #include <stdio.h>
#include "CudaObject.h"
#include "CudaCommon.cuh"
namespace gpu_cuda {
__device__ float activator_derivative( float x )
{
float sig = 1.0f / (1.0f + exp( -x ));
return sig * (1 - sig);
}
__global__ void calcDetectObjectsForwardGPU(float *in, float *out, int in_size_x, int in_size_y, int in_size_z, int max_bounding_boxes, int max_classes )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
for( int i = 0; i < max_bounding_boxes; i=i+(4+max_classes)){
int index = id * (in_size_x * in_size_y * in_size_z) + i;
out[index ] = 1.0f / (1.0f + exp( -in[index ] )); // x: sigmoid
out[index+1] = 1.0f / (1.0f + exp( -in[index+1] )); // y: sigmoid
out[index+2] = exp( in[index+2] ); // w: exp
out[index+3] = exp( in[index+3] ); // h: exp
for( int c = 0; c < max_classes; ++c){
int index2 = id * (in_size_x * in_size_y * in_size_z) + i+4+c;
out[index2] = 1.0f / (1.0f + exp( -in[index2] )); // id: sigmoid
}
}
/* original
for(int b = 0; b < in.size.b; ++b ){
for( int i = 0; i < _max_bounding_boxes; i=i+(4+_max_classes)){
out( b, i , 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i , 0, 0 ) )); // x: sigmoid
out( b, i+1, 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i+1, 0, 0 ) )); // y: sigmoid
out( b, i+2, 0, 0 ) = exp( in( b, i+2, 0, 0 ) ); // w: exp
out( b, i+3, 0, 0 ) = exp( in( b, i+3, 0, 0 ) ); // h: exp
for( int c = 0; c < _max_classes; ++c){
out( b, i+4+c, 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i+4+c , 0, 0 ) )); // id: sigmoid
}
}
}
*/
}
__global__ void calcDetectObjectsBackwardGPU( float *dz_in, float *dz, float *in, int batch_size, int in_size_x, int in_size_y, int in_size_z, int max_bounding_boxes, int max_classes )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
for( int i = 0; i < max_bounding_boxes; i=i+(4+max_classes)){
int index = id * (in_size_x * in_size_y * in_size_z) + i;
dz[index ] = activator_derivative( in[index ] ) * dz_in[index ]; // x: sigmoid derivative * grads
dz[index+1] = activator_derivative( in[index+1] ) * dz_in[index+1]; // y: sigmoid derivative * grads
dz[index+2] = exp( in[index+2] ) * dz_in[index+2]; // w: exp * grads
dz[index+3] = exp( in[index+3] ) * dz_in[index+3]; // w: exp * grads
for( int c = 0; c <max_classes; ++c){
int index2 = id * (in_size_x * in_size_y * in_size_z) + i+4+c;
dz[index2] = activator_derivative( in[index2] ) * dz_in[index2]; // id: sigmoid derivative * grads
}
}
/* original code
for(int b = 0; b < dz_in.size.b; ++b ){
for( int i = 0; i < _max_bounding_boxes; i=i+(4+_max_classes)){
dz( b, i , 0, 0 ) = activator_derivative( in( b, i , 0, 0 ) ) * dz_in( b, i , 0, 0 ); // x: sigmoid derivative * grads
dz( b, i+1, 0, 0 ) = activator_derivative( in( b, i+1 , 0, 0 ) ) * dz_in( b, i+1, 0, 0 ); // y: sigmoid derivative * grads
dz( b, i+2, 0, 0 ) = exp( in( b, i+2, 0, 0 ) ) * dz_in( b, i+2, 0, 0 ); // w: exp * grads
dz( b, i+3, 0, 0 ) = exp( in( b, i+3, 0, 0 ) ) * dz_in( b, i+3, 0, 0 ); // h: exp * grads
for( int c = 0; c <_max_classes; ++c){
dz( b, i+4+c, 0, 0 ) = activator_derivative( in( b, i+4+c , 0, 0 ) ) * dz_in( b, i+4+c , 0, 0 ); // id: sigmoid derivative * grads
}
}
}
*/
}
void detectObjectsForwardGPU(float *in, float *out, int batch_size, int in_size_x, int in_size_y, int in_size_z, int max_bounding_boxes, int max_classes )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(batch_size);
calcDetectObjectsForwardGPU<<<grid, BLOCK>>>(in, out, in_size_x, in_size_y, in_size_z, max_bounding_boxes, max_classes );
}
void detectObjectsBackwardAddFirstArrayToSecondArrayGPU( float *dz_next_layer, float *dz_in, int N )
{
CudaObject cuda = CudaObject();
dim3 grid_in = cuda.cudaGridSize(N);
cudaAddFirstArrayToSecondArray<<<grid_in, BLOCK>>>( dz_next_layer, dz_in, N );
}
void detectObjectsBackwardGPU( float *dz_in, float *dz, float *in, int batch_size, int in_size_x, int in_size_y, int in_size_z, int max_bounding_boxes, int max_classes )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(batch_size);
calcDetectObjectsBackwardGPU<<<grid, BLOCK>>>( dz_in, dz, in, batch_size, in_size_x, in_size_y, in_size_z, max_bounding_boxes, max_classes );
}
} // namespace gpu
|
398241d403b078837ea6887558dcba7caa1a54eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2015 by Contributors
* \file roi_pooling.cu
* \brief roi pooling operator
* \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
*/
#include "./roi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
template<typename Dtype>
__global__ void ROIPoolForwardKernel(const int count, const Dtype* bottom_data,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
Dtype* argmax_data) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
if (roi_batch_ind < 0) {
top_data[index] = 0;
argmax_data[index] = 0;
continue;
}
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = (Dtype)maxidx;
}
}
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
const Dtype *bottom_data = data.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *top_data = out.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward");
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
hipLaunchKernelGGL(( ROIPoolForwardKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream,
count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, top_data, argmax_data);
}
template<typename Dtype>
__global__ void ROIPoolBackwardAccKernel(const int count, const Dtype* top_diff,
const Dtype* argmax_data, const int num_rois,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (static_cast<int>(offset_argmax_data[ph * pooled_width + pw]) == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] += gradient;
}
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
const Dtype *top_diff = out_grad.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *bottom_diff = in_grad.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = in_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward");
hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
hipLaunchKernelGGL(( ROIPoolBackwardAccKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream,
count, top_diff, argmax_data, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_diff, bottom_rois);
}
} // namespace cuda
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
cuda::ROIPoolForward(out, data, bbox, max_idx, spatial_scale);
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
cuda::ROIPoolBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| 398241d403b078837ea6887558dcba7caa1a54eb.cu | /*!
* Copyright (c) 2015 by Contributors
* \file roi_pooling.cu
* \brief roi pooling operator
* \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
*/
#include "./roi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
template<typename Dtype>
__global__ void ROIPoolForwardKernel(const int count, const Dtype* bottom_data,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
Dtype* argmax_data) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
if (roi_batch_ind < 0) {
top_data[index] = 0;
argmax_data[index] = 0;
continue;
}
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = (Dtype)maxidx;
}
}
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
const Dtype *bottom_data = data.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *top_data = out.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward");
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
ROIPoolForwardKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>(
count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, top_data, argmax_data);
}
template<typename Dtype>
__global__ void ROIPoolBackwardAccKernel(const int count, const Dtype* top_diff,
const Dtype* argmax_data, const int num_rois,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (static_cast<int>(offset_argmax_data[ph * pooled_width + pw]) == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] += gradient;
}
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
const Dtype *top_diff = out_grad.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *bottom_diff = in_grad.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = in_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward");
cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
ROIPoolBackwardAccKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>(
count, top_diff, argmax_data, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_diff, bottom_rois);
}
} // namespace cuda
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
cuda::ROIPoolForward(out, data, bbox, max_idx, spatial_scale);
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale) {
cuda::ROIPoolBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.